From bfecf5e9c6f9af79d107031d5632314472c993c2 Mon Sep 17 00:00:00 2001 From: therealssj Date: Sat, 28 Mar 2020 16:23:08 +0530 Subject: [PATCH 1/2] example implementation --- cmd/multicoin/multicoin.go | 6 +- cmd/proxy/proxy.go | 74 + go.mod | 14 +- go.sum | 214 +- pkg/api/gateway.go | 4 +- pkg/api/http.go | 8 +- pkg/coin/btc/btc.go | 51 +- pkg/coin/btc/rpc/client.go | 132 + pkg/coin/coin.go | 2 +- pkg/multicoin/multicoin.go | 16 +- pkg/wallet/deterministic_wallet.go | 1 + pkg/wallet/readable.go | 10 +- .../SkycoinProject/skycoin/src/api/README.md | 4528 ----------------- .../SkycoinProject/skycoin/src/api/address.go | 56 - .../skycoin/src/api/blockchain.go | 395 -- .../SkycoinProject/skycoin/src/api/client.go | 1314 ----- .../SkycoinProject/skycoin/src/api/csrf.go | 165 - .../skycoin/src/api/explorer.go | 242 - .../SkycoinProject/skycoin/src/api/gateway.go | 120 - .../SkycoinProject/skycoin/src/api/health.go | 118 - .../SkycoinProject/skycoin/src/api/http.go | 703 --- .../SkycoinProject/skycoin/src/api/metrics.go | 85 - .../skycoin/src/api/middleware.go | 208 - .../SkycoinProject/skycoin/src/api/network.go | 228 - .../SkycoinProject/skycoin/src/api/outputs.go | 77 - .../SkycoinProject/skycoin/src/api/spend.go | 748 --- .../SkycoinProject/skycoin/src/api/storage.go | 190 - .../skycoin/src/api/transaction.go | 685 --- .../SkycoinProject/skycoin/src/api/uxout.go | 87 - .../SkycoinProject/skycoin/src/api/version.go | 22 - .../SkycoinProject/skycoin/src/api/wallet.go | 986 ---- .../skycoin/src/cipher/encoder/README.md | 8 - .../skycoin/src/cipher/encoder/encoder.go | 1027 ---- .../SkycoinProject/skycoin/src/coin/block.go | 236 - .../skycoin/src/coin/block_body_skyencoder.go | 407 -- .../src/coin/block_header_skyencoder.go | 168 - .../skycoin/src/coin/outputs.go | 327 -- .../src/coin/transaction_inputs_skyencoder.go | 135 - .../coin/transaction_outputs_skyencoder.go | 179 - .../src/coin/transaction_skyencoder.go | 357 -- .../skycoin/src/coin/transactions.go | 788 --- .../skycoin/src/coin/ux_body_skyencoder.go | 138 - .../skycoin/src/coin/ux_head_skyencoder.go | 93 - .../announce_blocks_message_skyencoder.go | 78 - .../announce_txns_message_skyencoder.go | 135 - .../skycoin/src/daemon/announced_txns.go | 44 - .../skycoin/src/daemon/connections.go | 554 -- .../skycoin/src/daemon/daemon.go | 1798 ------- .../daemon/disconnect_message_skyencoder.go | 118 - .../skycoin/src/daemon/errors.go | 112 - .../daemon/get_blocks_message_skyencoder.go | 93 - .../src/daemon/get_txns_message_skyencoder.go | 135 - .../daemon/give_blocks_message_skyencoder.go | 579 --- .../daemon/give_peers_message_skyencoder.go | 149 - .../daemon/give_txns_message_skyencoder.go | 408 -- .../skycoin/src/daemon/gnet/README.md | 7 - .../skycoin/src/daemon/gnet/dispatcher.go | 209 - .../skycoin/src/daemon/gnet/message.go | 140 - .../skycoin/src/daemon/gnet/pool.go | 1066 ---- .../daemon/introduction_message_skyencoder.go | 162 - .../skycoin/src/daemon/ip_addr_skyencoder.go | 93 - .../skycoin/src/daemon/messages.go | 1390 ----- .../skycoin/src/daemon/pex/README.md | 8 - .../skycoin/src/daemon/pex/peerlist.go | 417 -- .../skycoin/src/daemon/pex/pex.go | 761 --- .../SkycoinProject/skycoin/src/daemon/pool.go | 124 - .../src/daemon/signed_block_skyencoder.go | 528 -- .../skycoin/src/daemon/strand/strand.go | 135 - .../src/daemon/transaction_skyencoder.go | 358 -- .../skycoin/src/kvstorage/empty.go | 5 - .../skycoin/src/kvstorage/error.go | 16 - .../skycoin/src/kvstorage/kvstorage.go | 162 - .../skycoin/src/kvstorage/manager.go | 252 - .../skycoin/src/kvstorage/manager_config.go | 15 - .../skycoin/src/kvstorage/map.go | 12 - .../skycoin/src/params/distribution.go | 153 - .../skycoin/src/params/droplet.go | 34 - .../SkycoinProject/skycoin/src/params/init.go | 78 - .../skycoin/src/params/params.go | 128 - .../skycoin/src/params/verify_txn.go | 55 - .../skycoin/src/readable/block.go | 145 - .../skycoin/src/readable/blockchain.go | 58 - .../skycoin/src/readable/fiber.go | 16 - .../skycoin/src/readable/network.go | 73 - .../skycoin/src/readable/output.go | 276 - .../skycoin/src/readable/richlist.go | 32 - .../skycoin/src/readable/transaction.go | 252 - .../skycoin/src/readable/verbose.go | 258 - .../skycoin/src/readable/version.go | 20 - .../skycoin/src/readable/wallet.go | 68 - .../skycoin/src/testutil/testutil.go | 120 - .../skycoin/src/transaction/choose.go | 278 - .../skycoin/src/transaction/create.go | 504 -- .../skycoin/src/transaction/hours.go | 165 - .../skycoin/src/transaction/params.go | 155 - .../skycoin/src/util/apputil/apputil.go | 56 + .../skycoin/src/util/elapse/elapser.go | 72 - .../skycoin/src/util/fee/fee.go | 99 - .../skycoin/src/util/iputil/iputil.go | 66 - .../skycoin/src/util/timeutil/timeutil.go | 15 - .../skycoin/src/util/useragent/useragent.go | 222 - .../skycoin/src/visor/blockchain.go | 852 ---- .../src/visor/blockdb/block_skyencoder.go | 513 -- .../skycoin/src/visor/blockdb/block_tree.go | 269 - .../skycoin/src/visor/blockdb/blockchain.go | 285 -- .../skycoin/src/visor/blockdb/blocksigs.go | 69 - .../skycoin/src/visor/blockdb/chain_meta.go | 29 - .../blockdb/hash_pairs_wrapper_skyencoder.go | 141 - .../blockdb/hashes_wrapper_skyencoder.go | 126 - .../visor/blockdb/sig_wrapper_skyencoder.go | 78 - .../skycoin/src/visor/blockdb/unspent.go | 541 -- .../src/visor/blockdb/ux_out_skyencoder.go | 171 - .../skycoin/src/visor/blockdb/verify.go | 156 - .../skycoin/src/visor/config.go | 119 - .../SkycoinProject/skycoin/src/visor/db.go | 401 -- .../skycoin/src/visor/dbutil/dbutil.go | 360 -- .../skycoin/src/visor/distribution.go | 24 - .../src/visor/historydb/address_txn.go | 79 - .../src/visor/historydb/address_uxout.go | 66 - .../historydb/hashes_wrapper_skyencoder.go | 126 - .../src/visor/historydb/history_meta.go | 36 - .../skycoin/src/visor/historydb/historydb.go | 395 -- .../skycoin/src/visor/historydb/output.go | 102 - .../src/visor/historydb/transaction.go | 107 - .../visor/historydb/transaction_skyencoder.go | 373 -- .../src/visor/historydb/ux_out_skyencoder.go | 198 - .../skycoin/src/visor/historydb/verify.go | 129 - .../skycoin/src/visor/interfaces.go | 71 - .../SkycoinProject/skycoin/src/visor/meta.go | 71 - .../skycoin/src/visor/objects.go | 213 - .../skycoin/src/visor/richlist.go | 64 - .../skycoin/src/visor/unconfirmed.go | 561 -- .../unconfirmed_transaction_skyencoder.go | 418 -- .../skycoin/src/visor/ux_array_skyencoder.go | 216 - .../skycoin/src/visor/verify.go | 336 -- .../SkycoinProject/skycoin/src/visor/visor.go | 2469 --------- .../skycoin/src/visor/visor_wallet.go | 559 -- .../skycoin/src/wallet/balance.go | 87 - .../skycoin/src/wallet/bip44_wallet.go | 535 -- .../skycoin/src/wallet/collection_wallet.go | 207 - .../skycoin/src/wallet/crypto.go | 66 - .../src/wallet/deterministic_wallet.go | 321 -- .../skycoin/src/wallet/entry.go | 126 - .../SkycoinProject/skycoin/src/wallet/meta.go | 357 -- .../skycoin/src/wallet/readable.go | 200 - .../skycoin/src/wallet/secrets.go | 36 - .../skycoin/src/wallet/service.go | 616 --- .../skycoin/src/wallet/transaction.go | 286 -- .../skycoin/src/wallet/wallet.go | 727 --- .../skycoin/src/wallet/wallets.go | 126 - .../skycoin/src/wallet/xpub_wallet.go | 348 -- vendor/github.com/StackExchange/wmi/LICENSE | 20 - vendor/github.com/StackExchange/wmi/README.md | 6 - .../StackExchange/wmi/swbemservices.go | 260 - vendor/github.com/StackExchange/wmi/wmi.go | 486 -- .../VictoriaMetrics/fastcache/LICENSE | 22 - .../VictoriaMetrics/fastcache/README.md | 116 - .../VictoriaMetrics/fastcache/bigcache.go | 152 - .../VictoriaMetrics/fastcache/fastcache.go | 415 -- .../VictoriaMetrics/fastcache/file.go | 400 -- .../VictoriaMetrics/fastcache/go.mod | 11 - .../VictoriaMetrics/fastcache/go.sum | 24 - .../VictoriaMetrics/fastcache/malloc_heap.go | 11 - .../VictoriaMetrics/fastcache/malloc_mmap.go | 52 - .../aristanetworks/goarista/AUTHORS | 25 - .../aristanetworks/goarista/COPYING | 177 - .../goarista/monotime/issue15006.s | 6 - .../goarista/monotime/nanotime.go | 31 - vendor/github.com/beorn7/perks/LICENSE | 20 - .../beorn7/perks/quantile/exampledata.txt | 2388 --------- .../beorn7/perks/quantile/stream.go | 316 -- vendor/github.com/blang/semver/.travis.yml | 21 - vendor/github.com/blang/semver/LICENSE | 22 - vendor/github.com/blang/semver/README.md | 194 - vendor/github.com/blang/semver/json.go | 23 - vendor/github.com/blang/semver/package.json | 17 - vendor/github.com/blang/semver/range.go | 416 -- vendor/github.com/blang/semver/semver.go | 418 -- vendor/github.com/blang/semver/sort.go | 28 - vendor/github.com/blang/semver/sql.go | 30 - vendor/github.com/boltdb/bolt/.gitignore | 4 - vendor/github.com/boltdb/bolt/LICENSE | 20 - vendor/github.com/boltdb/bolt/Makefile | 18 - vendor/github.com/boltdb/bolt/README.md | 916 ---- vendor/github.com/boltdb/bolt/appveyor.yml | 18 - vendor/github.com/boltdb/bolt/bolt_386.go | 10 - vendor/github.com/boltdb/bolt/bolt_amd64.go | 10 - vendor/github.com/boltdb/bolt/bolt_arm.go | 28 - vendor/github.com/boltdb/bolt/bolt_arm64.go | 12 - vendor/github.com/boltdb/bolt/bolt_linux.go | 10 - vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 - vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 - vendor/github.com/boltdb/bolt/bolt_ppc64.go | 12 - vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 12 - vendor/github.com/boltdb/bolt/bolt_s390x.go | 12 - vendor/github.com/boltdb/bolt/bolt_unix.go | 89 - .../boltdb/bolt/bolt_unix_solaris.go | 90 - vendor/github.com/boltdb/bolt/bolt_windows.go | 144 - .../github.com/boltdb/bolt/boltsync_unix.go | 8 - vendor/github.com/boltdb/bolt/bucket.go | 777 --- vendor/github.com/boltdb/bolt/cursor.go | 400 -- vendor/github.com/boltdb/bolt/db.go | 1039 ---- vendor/github.com/boltdb/bolt/doc.go | 44 - vendor/github.com/boltdb/bolt/errors.go | 71 - vendor/github.com/boltdb/bolt/freelist.go | 252 - vendor/github.com/boltdb/bolt/node.go | 604 --- vendor/github.com/boltdb/bolt/page.go | 197 - vendor/github.com/boltdb/bolt/tx.go | 684 --- vendor/github.com/cenkalti/backoff/.gitignore | 22 - .../github.com/cenkalti/backoff/.travis.yml | 10 - vendor/github.com/cenkalti/backoff/LICENSE | 20 - vendor/github.com/cenkalti/backoff/README.md | 30 - vendor/github.com/cenkalti/backoff/backoff.go | 66 - vendor/github.com/cenkalti/backoff/context.go | 63 - .../cenkalti/backoff/exponential.go | 153 - vendor/github.com/cenkalti/backoff/retry.go | 82 - vendor/github.com/cenkalti/backoff/ticker.go | 82 - vendor/github.com/cenkalti/backoff/tries.go | 35 - .../github.com/cespare/xxhash/v2/.travis.yml | 8 - .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 - vendor/github.com/cespare/xxhash/v2/README.md | 67 - vendor/github.com/cespare/xxhash/v2/go.mod | 3 - vendor/github.com/cespare/xxhash/v2/go.sum | 0 vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 - .../cespare/xxhash/v2/xxhash_amd64.go | 13 - .../cespare/xxhash/v2/xxhash_amd64.s | 215 - .../cespare/xxhash/v2/xxhash_other.go | 76 - .../cespare/xxhash/v2/xxhash_safe.go | 15 - .../cespare/xxhash/v2/xxhash_unsafe.go | 46 - .../github.com/elastic/gosigar/.appveyor.yml | 84 - vendor/github.com/elastic/gosigar/.gitignore | 41 - vendor/github.com/elastic/gosigar/.travis.yml | 36 - .../github.com/elastic/gosigar/CHANGELOG.md | 113 - vendor/github.com/elastic/gosigar/LICENSE | 201 - vendor/github.com/elastic/gosigar/NOTICE | 9 - vendor/github.com/elastic/gosigar/README.md | 58 - vendor/github.com/elastic/gosigar/Vagrantfile | 25 - vendor/github.com/elastic/gosigar/codecov.yml | 21 - .../elastic/gosigar/concrete_sigar.go | 89 - .../elastic/gosigar/sigar_darwin.go | 498 -- .../elastic/gosigar/sigar_format.go | 126 - .../elastic/gosigar/sigar_freebsd.go | 113 - .../elastic/gosigar/sigar_interface.go | 207 - .../github.com/elastic/gosigar/sigar_linux.go | 108 - .../elastic/gosigar/sigar_linux_common.go | 482 -- .../elastic/gosigar/sigar_openbsd.go | 426 -- .../github.com/elastic/gosigar/sigar_stub.go | 75 - .../github.com/elastic/gosigar/sigar_unix.go | 69 - .../github.com/elastic/gosigar/sigar_util.go | 22 - .../elastic/gosigar/sigar_windows.go | 441 -- .../elastic/gosigar/sys/windows/doc.go | 2 - .../elastic/gosigar/sys/windows/ntquery.go | 132 - .../elastic/gosigar/sys/windows/privileges.go | 272 - .../gosigar/sys/windows/syscall_windows.go | 385 -- .../elastic/gosigar/sys/windows/version.go | 43 - .../gosigar/sys/windows/zsyscall_windows.go | 262 - .../ethereum/go-ethereum/.dockerignore | 5 - .../ethereum/go-ethereum/.gitattributes | 3 - .../ethereum/go-ethereum/.gitignore | 49 - .../ethereum/go-ethereum/.gitmodules | 3 - .../ethereum/go-ethereum/.golangci.yml | 50 - .../github.com/ethereum/go-ethereum/.mailmap | 123 - .../ethereum/go-ethereum/.travis.yml | 241 - .../ethereum/go-ethereum/Dockerfile | 16 - .../ethereum/go-ethereum/Dockerfile.alltools | 15 - .../github.com/ethereum/go-ethereum/Makefile | 146 - .../github.com/ethereum/go-ethereum/README.md | 348 -- .../ethereum/go-ethereum/SECURITY.md | 120 - .../ethereum/go-ethereum/appveyor.yml | 40 - .../ethereum/go-ethereum/circle.yml | 32 - .../go-ethereum/common/mclock/mclock.go | 123 - .../go-ethereum/common/mclock/simclock.go | 209 - .../go-ethereum/common/prque/lazyqueue.go | 182 - .../go-ethereum/common/prque/prque.go | 78 - .../go-ethereum/common/prque/sstack.go | 114 - .../ethereum/go-ethereum/core/types/block.go | 395 -- .../ethereum/go-ethereum/core/types/bloom9.go | 136 - .../go-ethereum/core/types/derive_sha.go | 41 - .../go-ethereum/core/types/gen_header_json.go | 138 - .../go-ethereum/core/types/gen_log_json.go | 92 - .../core/types/gen_receipt_json.go | 104 - .../go-ethereum/core/types/gen_tx_json.go | 101 - .../ethereum/go-ethereum/core/types/log.go | 143 - .../go-ethereum/core/types/receipt.go | 336 -- .../go-ethereum/core/types/transaction.go | 419 -- .../core/types/transaction_signing.go | 260 - .../ethereum/go-ethereum/ethdb/batch.go | 46 - .../ethereum/go-ethereum/ethdb/database.go | 131 - .../ethereum/go-ethereum/ethdb/iterator.go | 66 - .../ethereum/go-ethereum/fuzzbuzz.yaml | 44 - vendor/github.com/ethereum/go-ethereum/go.mod | 71 - vendor/github.com/ethereum/go-ethereum/go.sum | 236 - .../ethereum/go-ethereum/interfaces.go | 211 - .../ethereum/go-ethereum/log/CONTRIBUTORS | 11 - .../ethereum/go-ethereum/log/LICENSE | 13 - .../ethereum/go-ethereum/log/README.md | 77 - .../go-ethereum/log/README_ETHEREUM.md | 5 - .../ethereum/go-ethereum/log/doc.go | 333 -- .../ethereum/go-ethereum/log/format.go | 406 -- .../ethereum/go-ethereum/log/handler.go | 359 -- .../ethereum/go-ethereum/log/handler_glog.go | 232 - .../ethereum/go-ethereum/log/handler_go13.go | 26 - .../ethereum/go-ethereum/log/handler_go14.go | 23 - .../ethereum/go-ethereum/log/logger.go | 245 - .../ethereum/go-ethereum/log/root.go | 70 - .../ethereum/go-ethereum/log/syslog.go | 57 - .../ethereum/go-ethereum/metrics/FORK.md | 1 - .../ethereum/go-ethereum/metrics/LICENSE | 29 - .../ethereum/go-ethereum/metrics/README.md | 166 - .../ethereum/go-ethereum/metrics/counter.go | 144 - .../ethereum/go-ethereum/metrics/cpu.go | 36 - .../go-ethereum/metrics/cpu_syscall.go | 35 - .../go-ethereum/metrics/cpu_windows.go | 23 - .../ethereum/go-ethereum/metrics/debug.go | 76 - .../ethereum/go-ethereum/metrics/disk.go | 25 - .../go-ethereum/metrics/disk_linux.go | 72 - .../ethereum/go-ethereum/metrics/disk_nop.go | 26 - .../ethereum/go-ethereum/metrics/doc.go | 4 - .../ethereum/go-ethereum/metrics/ewma.go | 115 - .../ethereum/go-ethereum/metrics/gauge.go | 158 - .../go-ethereum/metrics/gauge_float64.go | 127 - .../ethereum/go-ethereum/metrics/graphite.go | 113 - .../go-ethereum/metrics/healthcheck.go | 61 - .../ethereum/go-ethereum/metrics/histogram.go | 202 - .../ethereum/go-ethereum/metrics/json.go | 31 - .../ethereum/go-ethereum/metrics/log.go | 80 - .../ethereum/go-ethereum/metrics/memory.md | 285 -- .../ethereum/go-ethereum/metrics/meter.go | 300 -- .../ethereum/go-ethereum/metrics/metrics.go | 126 - .../ethereum/go-ethereum/metrics/opentsdb.go | 119 - .../ethereum/go-ethereum/metrics/registry.go | 358 -- .../go-ethereum/metrics/resetting_timer.go | 241 - .../ethereum/go-ethereum/metrics/runtime.go | 212 - .../go-ethereum/metrics/runtime_cgo.go | 10 - .../metrics/runtime_gccpufraction.go | 9 - .../go-ethereum/metrics/runtime_no_cgo.go | 7 - .../metrics/runtime_no_gccpufraction.go | 9 - .../ethereum/go-ethereum/metrics/sample.go | 616 --- .../ethereum/go-ethereum/metrics/syslog.go | 78 - .../ethereum/go-ethereum/metrics/timer.go | 326 -- .../ethereum/go-ethereum/metrics/validate.sh | 10 - .../ethereum/go-ethereum/metrics/writer.go | 100 - .../ethereum/go-ethereum/params/bootnodes.go | 84 - .../ethereum/go-ethereum/params/config.go | 590 --- .../ethereum/go-ethereum/params/dao.go | 158 - .../go-ethereum/params/denomination.go | 28 - .../go-ethereum/params/network_params.go | 61 - .../go-ethereum/params/protocol_params.go | 140 - .../ethereum/go-ethereum/params/version.go | 67 - .../ethereum/go-ethereum/trie/committer.go | 279 - .../ethereum/go-ethereum/trie/database.go | 851 ---- .../ethereum/go-ethereum/trie/encoding.go | 116 - .../ethereum/go-ethereum/trie/errors.go | 35 - .../ethereum/go-ethereum/trie/hasher.go | 215 - .../ethereum/go-ethereum/trie/iterator.go | 577 --- .../ethereum/go-ethereum/trie/node.go | 225 - .../ethereum/go-ethereum/trie/proof.go | 152 - .../ethereum/go-ethereum/trie/secure_trie.go | 196 - .../ethereum/go-ethereum/trie/sync.go | 342 -- .../ethereum/go-ethereum/trie/sync_bloom.go | 207 - .../ethereum/go-ethereum/trie/trie.go | 475 -- vendor/github.com/go-ole/go-ole/.travis.yml | 9 - vendor/github.com/go-ole/go-ole/ChangeLog.md | 49 - vendor/github.com/go-ole/go-ole/LICENSE | 21 - vendor/github.com/go-ole/go-ole/README.md | 46 - vendor/github.com/go-ole/go-ole/appveyor.yml | 54 - vendor/github.com/go-ole/go-ole/com.go | 329 -- vendor/github.com/go-ole/go-ole/com_func.go | 174 - vendor/github.com/go-ole/go-ole/connect.go | 192 - vendor/github.com/go-ole/go-ole/constants.go | 153 - vendor/github.com/go-ole/go-ole/error.go | 51 - vendor/github.com/go-ole/go-ole/error_func.go | 8 - .../github.com/go-ole/go-ole/error_windows.go | 24 - vendor/github.com/go-ole/go-ole/guid.go | 284 -- .../go-ole/go-ole/iconnectionpoint.go | 20 - .../go-ole/go-ole/iconnectionpoint_func.go | 21 - .../go-ole/go-ole/iconnectionpoint_windows.go | 43 - .../go-ole/iconnectionpointcontainer.go | 17 - .../go-ole/iconnectionpointcontainer_func.go | 11 - .../iconnectionpointcontainer_windows.go | 25 - vendor/github.com/go-ole/go-ole/idispatch.go | 94 - .../go-ole/go-ole/idispatch_func.go | 19 - .../go-ole/go-ole/idispatch_windows.go | 197 - .../github.com/go-ole/go-ole/ienumvariant.go | 19 - .../go-ole/go-ole/ienumvariant_func.go | 19 - .../go-ole/go-ole/ienumvariant_windows.go | 63 - .../github.com/go-ole/go-ole/iinspectable.go | 18 - .../go-ole/go-ole/iinspectable_func.go | 15 - .../go-ole/go-ole/iinspectable_windows.go | 72 - .../go-ole/go-ole/iprovideclassinfo.go | 21 - .../go-ole/go-ole/iprovideclassinfo_func.go | 7 - .../go-ole/iprovideclassinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/itypeinfo.go | 34 - .../go-ole/go-ole/itypeinfo_func.go | 7 - .../go-ole/go-ole/itypeinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/iunknown.go | 57 - .../github.com/go-ole/go-ole/iunknown_func.go | 19 - .../go-ole/go-ole/iunknown_windows.go | 58 - vendor/github.com/go-ole/go-ole/ole.go | 157 - .../go-ole/go-ole/oleutil/connection.go | 100 - .../go-ole/go-ole/oleutil/connection_func.go | 10 - .../go-ole/oleutil/connection_windows.go | 58 - .../go-ole/go-ole/oleutil/go-get.go | 6 - .../go-ole/go-ole/oleutil/oleutil.go | 127 - vendor/github.com/go-ole/go-ole/safearray.go | 27 - .../go-ole/go-ole/safearray_func.go | 211 - .../go-ole/go-ole/safearray_windows.go | 337 -- .../go-ole/go-ole/safearrayconversion.go | 140 - .../go-ole/go-ole/safearrayslices.go | 33 - vendor/github.com/go-ole/go-ole/utility.go | 101 - vendor/github.com/go-ole/go-ole/variables.go | 16 - vendor/github.com/go-ole/go-ole/variant.go | 105 - .../github.com/go-ole/go-ole/variant_386.go | 11 - .../github.com/go-ole/go-ole/variant_amd64.go | 12 - .../github.com/go-ole/go-ole/variant_s390x.go | 12 - vendor/github.com/go-ole/go-ole/vt_string.go | 58 - vendor/github.com/go-ole/go-ole/winrt.go | 99 - vendor/github.com/go-ole/go-ole/winrt_doc.go | 36 - vendor/github.com/go-stack/stack/.travis.yml | 15 - vendor/github.com/go-stack/stack/LICENSE.md | 21 - vendor/github.com/go-stack/stack/README.md | 38 - vendor/github.com/go-stack/stack/go.mod | 1 - vendor/github.com/go-stack/stack/stack.go | 400 -- vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 -- .../golang/protobuf/proto/deprecated.go | 63 - .../golang/protobuf/proto/discard.go | 350 -- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 -- .../golang/protobuf/proto/extensions.go | 607 --- .../github.com/golang/protobuf/proto/lib.go | 965 ---- .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 -- .../golang/protobuf/proto/pointer_unsafe.go | 313 -- .../golang/protobuf/proto/properties.go | 544 -- .../golang/protobuf/proto/table_marshal.go | 2776 ---------- .../golang/protobuf/proto/table_merge.go | 654 --- .../golang/protobuf/proto/table_unmarshal.go | 2053 -------- .../github.com/golang/protobuf/proto/text.go | 843 --- .../golang/protobuf/proto/text_parser.go | 880 ---- vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 15 - vendor/github.com/golang/snappy/CONTRIBUTORS | 37 - vendor/github.com/golang/snappy/LICENSE | 27 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 237 - .../github.com/golang/snappy/decode_amd64.go | 14 - .../github.com/golang/snappy/decode_amd64.s | 490 -- .../github.com/golang/snappy/decode_other.go | 101 - vendor/github.com/golang/snappy/encode.go | 285 -- .../github.com/golang/snappy/encode_amd64.go | 29 - .../github.com/golang/snappy/encode_amd64.s | 730 --- .../github.com/golang/snappy/encode_other.go | 238 - vendor/github.com/golang/snappy/go.mod | 1 - vendor/github.com/golang/snappy/snappy.go | 98 - .../golang_protobuf_extensions/LICENSE | 201 - .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../pbutil/encode.go | 46 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 15 - vendor/github.com/pkg/errors/LICENSE | 23 - vendor/github.com/pkg/errors/README.md | 52 - vendor/github.com/pkg/errors/appveyor.yml | 32 - vendor/github.com/pkg/errors/errors.go | 282 - vendor/github.com/pkg/errors/stack.go | 147 - .../prometheus/client_golang/LICENSE | 201 - .../prometheus/client_golang/NOTICE | 23 - .../client_golang/prometheus/.gitignore | 1 - .../client_golang/prometheus/README.md | 1 - .../client_golang/prometheus/collector.go | 120 - .../client_golang/prometheus/counter.go | 277 - .../client_golang/prometheus/desc.go | 184 - .../client_golang/prometheus/doc.go | 201 - .../prometheus/expvar_collector.go | 119 - .../client_golang/prometheus/fnv.go | 42 - .../client_golang/prometheus/gauge.go | 286 -- .../client_golang/prometheus/go_collector.go | 301 -- .../client_golang/prometheus/histogram.go | 614 --- .../client_golang/prometheus/http.go | 504 -- .../prometheus/internal/metric.go | 85 - .../client_golang/prometheus/labels.go | 87 - .../client_golang/prometheus/metric.go | 174 - .../client_golang/prometheus/observer.go | 52 - .../prometheus/process_collector.go | 204 - .../prometheus/promhttp/delegator.go | 199 - .../prometheus/promhttp/delegator_1_8.go | 181 - .../prometheus/promhttp/delegator_pre_1_8.go | 44 - .../client_golang/prometheus/promhttp/http.go | 311 -- .../prometheus/promhttp/instrument_client.go | 97 - .../promhttp/instrument_client_1_8.go | 144 - .../prometheus/promhttp/instrument_server.go | 447 -- .../client_golang/prometheus/registry.go | 931 ---- .../client_golang/prometheus/summary.go | 626 --- .../client_golang/prometheus/timer.go | 51 - .../client_golang/prometheus/untyped.go | 42 - .../client_golang/prometheus/value.go | 162 - .../client_golang/prometheus/vec.go | 472 -- .../client_golang/prometheus/wrap.go | 179 - .../prometheus/client_model/LICENSE | 201 - .../github.com/prometheus/client_model/NOTICE | 5 - .../prometheus/client_model/go/metrics.pb.go | 633 --- vendor/github.com/prometheus/common/LICENSE | 201 - vendor/github.com/prometheus/common/NOTICE | 5 - .../prometheus/common/expfmt/decode.go | 429 -- .../prometheus/common/expfmt/encode.go | 88 - .../prometheus/common/expfmt/expfmt.go | 38 - .../prometheus/common/expfmt/fuzz.go | 36 - .../prometheus/common/expfmt/text_create.go | 468 -- .../prometheus/common/expfmt/text_parse.go | 757 --- .../bitbucket.org/ww/goautoneg/README.txt | 67 - .../bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../prometheus/common/model/alert.go | 136 - .../prometheus/common/model/fingerprinting.go | 105 - .../github.com/prometheus/common/model/fnv.go | 42 - .../prometheus/common/model/labels.go | 210 - .../prometheus/common/model/labelset.go | 169 - .../prometheus/common/model/metric.go | 103 - .../prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 144 - .../prometheus/common/model/silence.go | 106 - .../prometheus/common/model/time.go | 264 - .../prometheus/common/model/value.go | 416 -- .../github.com/prometheus/procfs/.gitignore | 1 - .../prometheus/procfs/CONTRIBUTING.md | 18 - vendor/github.com/prometheus/procfs/LICENSE | 201 - .../prometheus/procfs/MAINTAINERS.md | 1 - vendor/github.com/prometheus/procfs/Makefile | 77 - vendor/github.com/prometheus/procfs/NOTICE | 7 - vendor/github.com/prometheus/procfs/README.md | 11 - .../github.com/prometheus/procfs/buddyinfo.go | 95 - vendor/github.com/prometheus/procfs/doc.go | 45 - .../prometheus/procfs/fixtures.ttar | 462 -- vendor/github.com/prometheus/procfs/fs.go | 82 - .../prometheus/procfs/internal/util/parse.go | 59 - .../procfs/internal/util/sysreadfile_linux.go | 45 - vendor/github.com/prometheus/procfs/ipvs.go | 259 - vendor/github.com/prometheus/procfs/mdstat.go | 151 - .../prometheus/procfs/mountstats.go | 606 --- .../github.com/prometheus/procfs/net_dev.go | 216 - .../github.com/prometheus/procfs/nfs/nfs.go | 263 - .../github.com/prometheus/procfs/nfs/parse.go | 317 -- .../prometheus/procfs/nfs/parse_nfs.go | 67 - .../prometheus/procfs/nfs/parse_nfsd.go | 89 - vendor/github.com/prometheus/procfs/proc.go | 258 - .../github.com/prometheus/procfs/proc_io.go | 65 - .../prometheus/procfs/proc_limits.go | 150 - .../github.com/prometheus/procfs/proc_ns.go | 68 - .../github.com/prometheus/procfs/proc_stat.go | 188 - vendor/github.com/prometheus/procfs/stat.go | 232 - vendor/github.com/prometheus/procfs/ttar | 389 -- vendor/github.com/prometheus/procfs/xfrm.go | 187 - .../github.com/prometheus/procfs/xfs/parse.go | 330 -- .../github.com/prometheus/procfs/xfs/xfs.go | 163 - vendor/github.com/rs/cors/.travis.yml | 4 - vendor/github.com/rs/cors/LICENSE | 19 - vendor/github.com/rs/cors/README.md | 99 - vendor/github.com/rs/cors/cors.go | 412 -- vendor/github.com/rs/cors/utils.go | 70 - vendor/github.com/rs/xhandler/.travis.yml | 7 - vendor/github.com/rs/xhandler/LICENSE | 19 - vendor/github.com/rs/xhandler/README.md | 134 - vendor/github.com/rs/xhandler/chain.go | 121 - vendor/github.com/rs/xhandler/middleware.go | 59 - vendor/github.com/rs/xhandler/xhandler.go | 42 - .../steakknife/bloomfilter/.travis.yml | 14 - .../steakknife/bloomfilter/MIT-LICENSE.txt | 8 - .../steakknife/bloomfilter/README.md | 123 - .../steakknife/bloomfilter/binarymarshaler.go | 87 - .../bloomfilter/binaryunmarshaler.go | 111 - .../steakknife/bloomfilter/bloomfilter.go | 123 - .../steakknife/bloomfilter/conformance.go | 29 - .../steakknife/bloomfilter/debug.go | 37 - .../steakknife/bloomfilter/errors.go | 34 - .../steakknife/bloomfilter/fileio.go | 105 - .../github.com/steakknife/bloomfilter/gob.go | 23 - .../steakknife/bloomfilter/iscompatible.go | 41 - .../github.com/steakknife/bloomfilter/new.go | 134 - .../steakknife/bloomfilter/optimal.go | 28 - .../steakknife/bloomfilter/statistics.go | 43 - .../steakknife/bloomfilter/textmarshaler.go | 49 - .../steakknife/bloomfilter/textunmarshaler.go | 150 - .../github.com/steakknife/hamming/.gitignore | 2 - .../github.com/steakknife/hamming/.travis.yml | 14 - .../steakknife/hamming/MIT-LICENSE.txt | 8 - .../github.com/steakknife/hamming/README.md | 82 - vendor/github.com/steakknife/hamming/doc.go | 35 - .../github.com/steakknife/hamming/hamming.go | 70 - .../steakknife/hamming/popcnt_amd64.go | 65 - .../steakknife/hamming/popcnt_amd64.s | 64 - .../github.com/steakknife/hamming/popcount.go | 134 - .../steakknife/hamming/popcount_slices.go | 123 - .../hamming/popcount_slices_amd64.go | 72 - .../hamming/popcount_slices_amd64.s | 370 -- .../steakknife/hamming/slices_of_hamming.go | 144 - vendor/golang.org/x/net/AUTHORS | 3 - vendor/golang.org/x/net/CONTRIBUTORS | 3 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - vendor/golang.org/x/net/context/context.go | 56 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 -- vendor/golang.org/x/net/context/pre_go19.go | 109 - vendor/modules.txt | 89 +- 611 files changed, 563 insertions(+), 116942 deletions(-) create mode 100644 cmd/proxy/proxy.go create mode 100644 pkg/coin/btc/rpc/client.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/README.md delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/address.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/blockchain.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/client.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/csrf.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/explorer.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/gateway.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/health.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/http.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/metrics.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/middleware.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/network.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/outputs.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/spend.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/storage.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/transaction.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/uxout.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/version.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/api/wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/README.md delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/encoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/block.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/block_body_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/block_header_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/outputs.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_inputs_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_outputs_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/transactions.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/ux_body_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/coin/ux_head_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_blocks_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_txns_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/announced_txns.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/connections.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/daemon.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/disconnect_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/errors.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/get_blocks_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/get_txns_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/give_blocks_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/give_peers_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/give_txns_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/README.md delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/dispatcher.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/message.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/pool.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/introduction_message_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/ip_addr_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/messages.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/README.md delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/peerlist.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/pex.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/pool.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/signed_block_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/strand/strand.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/daemon/transaction_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/empty.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/error.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/kvstorage.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager_config.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/kvstorage/map.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/params/distribution.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/params/droplet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/params/init.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/params/params.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/params/verify_txn.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/block.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/blockchain.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/fiber.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/network.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/output.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/richlist.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/transaction.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/verbose.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/version.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/readable/wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/testutil/testutil.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/transaction/choose.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/transaction/create.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/transaction/hours.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/transaction/params.go create mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/apputil/apputil.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/elapse/elapser.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/fee/fee.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/iputil/iputil.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/timeutil/timeutil.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/util/useragent/useragent.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockchain.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_tree.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blockchain.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blocksigs.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/chain_meta.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hash_pairs_wrapper_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hashes_wrapper_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/sig_wrapper_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/unspent.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/ux_out_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/verify.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/config.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/db.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/dbutil/dbutil.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/distribution.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_txn.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_uxout.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/hashes_wrapper_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/history_meta.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/historydb.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/output.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/ux_out_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/verify.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/interfaces.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/meta.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/objects.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/richlist.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed_transaction_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/ux_array_skyencoder.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/verify.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/visor.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/visor/visor_wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/balance.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/bip44_wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/collection_wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/crypto.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/deterministic_wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/entry.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/meta.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/readable.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/secrets.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/service.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/transaction.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/wallet.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/wallets.go delete mode 100644 vendor/github.com/SkycoinProject/skycoin/src/wallet/xpub_wallet.go delete mode 100644 vendor/github.com/StackExchange/wmi/LICENSE delete mode 100644 vendor/github.com/StackExchange/wmi/README.md delete mode 100644 vendor/github.com/StackExchange/wmi/swbemservices.go delete mode 100644 vendor/github.com/StackExchange/wmi/wmi.go delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/LICENSE delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/README.md delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/bigcache.go delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/fastcache.go delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/file.go delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/go.mod delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/go.sum delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go delete mode 100644 vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go delete mode 100644 vendor/github.com/aristanetworks/goarista/AUTHORS delete mode 100644 vendor/github.com/aristanetworks/goarista/COPYING delete mode 100644 vendor/github.com/aristanetworks/goarista/monotime/issue15006.s delete mode 100644 vendor/github.com/aristanetworks/goarista/monotime/nanotime.go delete mode 100644 vendor/github.com/beorn7/perks/LICENSE delete mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/github.com/blang/semver/.travis.yml delete mode 100644 vendor/github.com/blang/semver/LICENSE delete mode 100644 vendor/github.com/blang/semver/README.md delete mode 100644 vendor/github.com/blang/semver/json.go delete mode 100644 vendor/github.com/blang/semver/package.json delete mode 100644 vendor/github.com/blang/semver/range.go delete mode 100644 vendor/github.com/blang/semver/semver.go delete mode 100644 vendor/github.com/blang/semver/sort.go delete mode 100644 vendor/github.com/blang/semver/sql.go delete mode 100644 vendor/github.com/boltdb/bolt/.gitignore delete mode 100644 vendor/github.com/boltdb/bolt/LICENSE delete mode 100644 vendor/github.com/boltdb/bolt/Makefile delete mode 100644 vendor/github.com/boltdb/bolt/README.md delete mode 100644 vendor/github.com/boltdb/bolt/appveyor.yml delete mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go delete mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go delete mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go delete mode 100644 vendor/github.com/boltdb/bolt/bucket.go delete mode 100644 vendor/github.com/boltdb/bolt/cursor.go delete mode 100644 vendor/github.com/boltdb/bolt/db.go delete mode 100644 vendor/github.com/boltdb/bolt/doc.go delete mode 100644 vendor/github.com/boltdb/bolt/errors.go delete mode 100644 vendor/github.com/boltdb/bolt/freelist.go delete mode 100644 vendor/github.com/boltdb/bolt/node.go delete mode 100644 vendor/github.com/boltdb/bolt/page.go delete mode 100644 vendor/github.com/boltdb/bolt/tx.go delete mode 100644 vendor/github.com/cenkalti/backoff/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/.travis.yml delete mode 100644 vendor/github.com/cenkalti/backoff/LICENSE delete mode 100644 vendor/github.com/cenkalti/backoff/README.md delete mode 100644 vendor/github.com/cenkalti/backoff/backoff.go delete mode 100644 vendor/github.com/cenkalti/backoff/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/ticker.go delete mode 100644 vendor/github.com/cenkalti/backoff/tries.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml delete mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt delete mode 100644 vendor/github.com/cespare/xxhash/v2/README.md delete mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod delete mode 100644 vendor/github.com/cespare/xxhash/v2/go.sum delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go delete mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go delete mode 100644 vendor/github.com/elastic/gosigar/.appveyor.yml delete mode 100644 vendor/github.com/elastic/gosigar/.gitignore delete mode 100644 vendor/github.com/elastic/gosigar/.travis.yml delete mode 100644 vendor/github.com/elastic/gosigar/CHANGELOG.md delete mode 100644 vendor/github.com/elastic/gosigar/LICENSE delete mode 100644 vendor/github.com/elastic/gosigar/NOTICE delete mode 100644 vendor/github.com/elastic/gosigar/README.md delete mode 100644 vendor/github.com/elastic/gosigar/Vagrantfile delete mode 100644 vendor/github.com/elastic/gosigar/codecov.yml delete mode 100644 vendor/github.com/elastic/gosigar/concrete_sigar.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_darwin.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_format.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_freebsd.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_interface.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_linux.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_linux_common.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_openbsd.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_stub.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_unix.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_util.go delete mode 100644 vendor/github.com/elastic/gosigar/sigar_windows.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/doc.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/ntquery.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/privileges.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/version.go delete mode 100644 vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/.dockerignore delete mode 100644 vendor/github.com/ethereum/go-ethereum/.gitattributes delete mode 100644 vendor/github.com/ethereum/go-ethereum/.gitignore delete mode 100644 vendor/github.com/ethereum/go-ethereum/.gitmodules delete mode 100644 vendor/github.com/ethereum/go-ethereum/.golangci.yml delete mode 100644 vendor/github.com/ethereum/go-ethereum/.mailmap delete mode 100644 vendor/github.com/ethereum/go-ethereum/.travis.yml delete mode 100644 vendor/github.com/ethereum/go-ethereum/Dockerfile delete mode 100644 vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools delete mode 100644 vendor/github.com/ethereum/go-ethereum/Makefile delete mode 100644 vendor/github.com/ethereum/go-ethereum/README.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/SECURITY.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/appveyor.yml delete mode 100644 vendor/github.com/ethereum/go-ethereum/circle.yml delete mode 100644 vendor/github.com/ethereum/go-ethereum/common/mclock/mclock.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/common/mclock/simclock.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/common/prque/lazyqueue.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/common/prque/prque.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/block.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/derive_sha.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/gen_log_json.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/gen_receipt_json.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/gen_tx_json.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/log.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/receipt.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/transaction.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/ethdb/batch.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/ethdb/database.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/ethdb/iterator.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/fuzzbuzz.yaml delete mode 100644 vendor/github.com/ethereum/go-ethereum/go.mod delete mode 100644 vendor/github.com/ethereum/go-ethereum/go.sum delete mode 100644 vendor/github.com/ethereum/go-ethereum/interfaces.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/CONTRIBUTORS delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/LICENSE delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/README.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/README_ETHEREUM.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/doc.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/format.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/handler.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/handler_glog.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/handler_go13.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/handler_go14.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/logger.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/root.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/log/syslog.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/FORK.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/LICENSE delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/README.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/counter.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/cpu.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/cpu_syscall.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/cpu_windows.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/debug.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/disk.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/disk_linux.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/disk_nop.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/doc.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/ewma.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/gauge.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/gauge_float64.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/graphite.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/healthcheck.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/histogram.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/json.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/log.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/memory.md delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/meter.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/metrics.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/opentsdb.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/registry.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/resetting_timer.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/runtime.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/runtime_cgo.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/runtime_gccpufraction.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_cgo.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_gccpufraction.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/sample.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/syslog.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/timer.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/validate.sh delete mode 100644 vendor/github.com/ethereum/go-ethereum/metrics/writer.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/bootnodes.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/config.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/dao.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/denomination.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/network_params.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/protocol_params.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/params/version.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/committer.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/database.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/encoding.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/errors.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/hasher.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/iterator.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/node.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/proof.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/secure_trie.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/sync.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/sync_bloom.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/trie/trie.go delete mode 100644 vendor/github.com/go-ole/go-ole/.travis.yml delete mode 100644 vendor/github.com/go-ole/go-ole/ChangeLog.md delete mode 100644 vendor/github.com/go-ole/go-ole/LICENSE delete mode 100644 vendor/github.com/go-ole/go-ole/README.md delete mode 100644 vendor/github.com/go-ole/go-ole/appveyor.yml delete mode 100644 vendor/github.com/go-ole/go-ole/com.go delete mode 100644 vendor/github.com/go-ole/go-ole/com_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/connect.go delete mode 100644 vendor/github.com/go-ole/go-ole/constants.go delete mode 100644 vendor/github.com/go-ole/go-ole/error.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/guid.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ole.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/go-get.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/oleutil.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayconversion.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayslices.go delete mode 100644 vendor/github.com/go-ole/go-ole/utility.go delete mode 100644 vendor/github.com/go-ole/go-ole/variables.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_386.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_amd64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_s390x.go delete mode 100644 vendor/github.com/go-ole/go-ole/vt_string.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt_doc.go delete mode 100644 vendor/github.com/go-stack/stack/.travis.yml delete mode 100644 vendor/github.com/go-stack/stack/LICENSE.md delete mode 100644 vendor/github.com/go-stack/stack/README.md delete mode 100644 vendor/github.com/go-stack/stack/go.mod delete mode 100644 vendor/github.com/go-stack/stack/stack.go delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/LICENSE delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/go.mod delete mode 100644 vendor/github.com/golang/snappy/snappy.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/github.com/pkg/errors/LICENSE delete mode 100644 vendor/github.com/pkg/errors/README.md delete mode 100644 vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/github.com/pkg/errors/errors.go delete mode 100644 vendor/github.com/pkg/errors/stack.go delete mode 100644 vendor/github.com/prometheus/client_golang/LICENSE delete mode 100644 vendor/github.com/prometheus/client_golang/NOTICE delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go delete mode 100644 vendor/github.com/prometheus/client_model/LICENSE delete mode 100644 vendor/github.com/prometheus/client_model/NOTICE delete mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 vendor/github.com/prometheus/common/LICENSE delete mode 100644 vendor/github.com/prometheus/common/NOTICE delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/github.com/prometheus/common/model/alert.go delete mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 vendor/github.com/prometheus/common/model/fnv.go delete mode 100644 vendor/github.com/prometheus/common/model/labels.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset.go delete mode 100644 vendor/github.com/prometheus/common/model/metric.go delete mode 100644 vendor/github.com/prometheus/common/model/model.go delete mode 100644 vendor/github.com/prometheus/common/model/signature.go delete mode 100644 vendor/github.com/prometheus/common/model/silence.go delete mode 100644 vendor/github.com/prometheus/common/model/time.go delete mode 100644 vendor/github.com/prometheus/common/model/value.go delete mode 100644 vendor/github.com/prometheus/procfs/.gitignore delete mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/procfs/LICENSE delete mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md delete mode 100644 vendor/github.com/prometheus/procfs/Makefile delete mode 100644 vendor/github.com/prometheus/procfs/NOTICE delete mode 100644 vendor/github.com/prometheus/procfs/README.md delete mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/doc.go delete mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar delete mode 100644 vendor/github.com/prometheus/procfs/fs.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go delete mode 100644 vendor/github.com/prometheus/procfs/ipvs.go delete mode 100644 vendor/github.com/prometheus/procfs/mdstat.go delete mode 100644 vendor/github.com/prometheus/procfs/mountstats.go delete mode 100644 vendor/github.com/prometheus/procfs/net_dev.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go delete mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go delete mode 100644 vendor/github.com/prometheus/procfs/proc.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_io.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go delete mode 100644 vendor/github.com/prometheus/procfs/stat.go delete mode 100644 vendor/github.com/prometheus/procfs/ttar delete mode 100644 vendor/github.com/prometheus/procfs/xfrm.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go delete mode 100644 vendor/github.com/rs/cors/.travis.yml delete mode 100644 vendor/github.com/rs/cors/LICENSE delete mode 100644 vendor/github.com/rs/cors/README.md delete mode 100644 vendor/github.com/rs/cors/cors.go delete mode 100644 vendor/github.com/rs/cors/utils.go delete mode 100644 vendor/github.com/rs/xhandler/.travis.yml delete mode 100644 vendor/github.com/rs/xhandler/LICENSE delete mode 100644 vendor/github.com/rs/xhandler/README.md delete mode 100644 vendor/github.com/rs/xhandler/chain.go delete mode 100644 vendor/github.com/rs/xhandler/middleware.go delete mode 100644 vendor/github.com/rs/xhandler/xhandler.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/.travis.yml delete mode 100644 vendor/github.com/steakknife/bloomfilter/MIT-LICENSE.txt delete mode 100644 vendor/github.com/steakknife/bloomfilter/README.md delete mode 100644 vendor/github.com/steakknife/bloomfilter/binarymarshaler.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/binaryunmarshaler.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/bloomfilter.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/conformance.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/debug.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/errors.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/fileio.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/gob.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/iscompatible.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/new.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/optimal.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/statistics.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/textmarshaler.go delete mode 100644 vendor/github.com/steakknife/bloomfilter/textunmarshaler.go delete mode 100644 vendor/github.com/steakknife/hamming/.gitignore delete mode 100644 vendor/github.com/steakknife/hamming/.travis.yml delete mode 100644 vendor/github.com/steakknife/hamming/MIT-LICENSE.txt delete mode 100644 vendor/github.com/steakknife/hamming/README.md delete mode 100644 vendor/github.com/steakknife/hamming/doc.go delete mode 100644 vendor/github.com/steakknife/hamming/hamming.go delete mode 100644 vendor/github.com/steakknife/hamming/popcnt_amd64.go delete mode 100644 vendor/github.com/steakknife/hamming/popcnt_amd64.s delete mode 100644 vendor/github.com/steakknife/hamming/popcount.go delete mode 100644 vendor/github.com/steakknife/hamming/popcount_slices.go delete mode 100644 vendor/github.com/steakknife/hamming/popcount_slices_amd64.go delete mode 100644 vendor/github.com/steakknife/hamming/popcount_slices_amd64.s delete mode 100644 vendor/github.com/steakknife/hamming/slices_of_hamming.go delete mode 100644 vendor/golang.org/x/net/AUTHORS delete mode 100644 vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go diff --git a/cmd/multicoin/multicoin.go b/cmd/multicoin/multicoin.go index e794e56..ba4b3ea 100644 --- a/cmd/multicoin/multicoin.go +++ b/cmd/multicoin/multicoin.go @@ -1,4 +1,4 @@ -package multicoin +package main import ( "flag" @@ -21,10 +21,6 @@ var ( parseFlags = true ) -func init() { - -} - func main() { if parseFlags { flag.Parse() diff --git a/cmd/proxy/proxy.go b/cmd/proxy/proxy.go new file mode 100644 index 0000000..fbf856e --- /dev/null +++ b/cmd/proxy/proxy.go @@ -0,0 +1,74 @@ +package main + +import ( + "fmt" + "log" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/SkycoinProject/dmsg" + "github.com/SkycoinProject/dmsg/cipher" + "github.com/SkycoinProject/dmsg/disc" +) + +var ( + PK = "0311607e59d1d0dc07fa33c641d31af10b8081de57b1c3c0d732804099f6a64dcb" + SK = "f36543b56f5bd8b93cac088c1550c9081e9bd8302b18f09fc7e42ed9270aae65" +) + +func main() { + var sPK cipher.PubKey + var sSK cipher.SecKey + _ = sPK.Set(PK) + _ = sSK.Set(SK) + + dmsgClient := dmsg.NewClient(sPK, sSK, disc.NewHTTP("http://dmsg.discovery.skywire.cc"), dmsg.DefaultConfig()) + go dmsgClient.Serve() + + time.Sleep(time.Second) // wait for dmsg client to be ready + + // port where server will listen + serverPort := uint16(8080) + + btcdrpcurl, err := url.Parse("http://127.0.0.1:18554") + if err != nil { + panic(err) + } + + proxy := httputil.NewSingleHostReverseProxy(btcdrpcurl) + + // prepare server route handling + mux := http.NewServeMux() + mux.HandleFunc("/", handler(proxy)) + + // run the server + srv := &http.Server{ + Handler: mux, + } + + list, err := dmsgClient.Listen(serverPort) + if err != nil { + panic(err) + } + + sErr := make(chan error, 1) + go func() { + sErr <- srv.Serve(list) + close(sErr) + }() + + var retErr error + select { + case retErr = <-sErr: + fmt.Println(retErr) + } +} + +func handler(p *httputil.ReverseProxy) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + log.Println(r.URL) + p.ServeHTTP(w, r) + } +} diff --git a/go.mod b/go.mod index 698f43e..42429ca 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,17 @@ module github.com/SkycoinProject/multicoin-wallet go 1.13 require ( - github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/NYTimes/gziphandler v1.1.1 + github.com/SkycoinProject/dmsg v0.1.1-0.20200323190518-7370d0e02392 + github.com/SkycoinProject/dmsg-http v0.0.0-20200318122149-9977e5986a7d github.com/SkycoinProject/skycoin v0.27.0 - github.com/blang/semver v3.5.1+incompatible // indirect github.com/boltdb/bolt v1.3.1 // indirect - github.com/btcsuite/btcd v0.20.1-beta - github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/btcsuite/btcd v0.20.1-beta.0.20200325095142-cfcf4fb7625a + github.com/btcsuite/btcutil v1.0.1 github.com/ethereum/go-ethereum v1.9.12 - github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc // indirect - github.com/sirupsen/logrus v1.4.2 + github.com/sirupsen/logrus v1.5.0 github.com/stretchr/testify v1.5.1 + golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 // indirect + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd // indirect ) diff --git a/go.sum b/go.sum index 8b9dcb0..4650dc5 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= @@ -16,49 +17,79 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/SkycoinProject/dmsg v0.0.0-20200306152741-acee74fa4514/go.mod h1:DzykXMLlx6Fx0fGjZsCIRas/MIvxW8DZpmDA6f2nCRk= +github.com/SkycoinProject/dmsg v0.1.0/go.mod h1:MiX+UG/6fl3g+9rS13/fq7BwUQ2eOlg1yOBOnNf6J6A= +github.com/SkycoinProject/dmsg v0.1.1-0.20200323190518-7370d0e02392 h1:XWHWCgjKwqwADkpkQLTV9XByqN0GJtrromb/tIfV5ac= +github.com/SkycoinProject/dmsg v0.1.1-0.20200323190518-7370d0e02392/go.mod h1:MiX+UG/6fl3g+9rS13/fq7BwUQ2eOlg1yOBOnNf6J6A= +github.com/SkycoinProject/dmsg-http v0.0.0-20200318122149-9977e5986a7d h1:gNOs5Io4cgvVKVFIaLdsR9hhjGyn/fn8yinTD07rrEM= +github.com/SkycoinProject/dmsg-http v0.0.0-20200318122149-9977e5986a7d/go.mod h1:ATW5sulr7oe1GIvPmQLZhJeOtEKT/lCzkm5iwTr2Ocg= +github.com/SkycoinProject/skycoin v0.26.0/go.mod h1:xqPLOKh5B6GBZlGA7B5IJfQmCy7mwimD9NlqxR3gMXo= github.com/SkycoinProject/skycoin v0.27.0 h1:N3IHxj8ossHOcsxLYOYugT+OaELLncYHJHxbbYLPPmY= github.com/SkycoinProject/skycoin v0.27.0/go.mod h1:xqPLOKh5B6GBZlGA7B5IJfQmCy7mwimD9NlqxR3gMXo= +github.com/SkycoinProject/skywire-mainnet v0.0.0-20200309204032-14af5342da86/go.mod h1:xuOpE5ZZU2kR39u0tJWtOpak/sJpnEFj1HpTxtyPU/A= +github.com/SkycoinProject/yamux v0.0.0-20191213015001-a36efeefbf6a h1:6nHCJqh7trsuRcpMC5JmtDukUndn2VC9sY64K6xQ7hQ= +github.com/SkycoinProject/yamux v0.0.0-20191213015001-a36efeefbf6a/go.mod h1:IaE1dxncLQs4RJcQTZPikJfAZY4szH87u2h0lT0SDuM= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4= github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= +github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.20.1-beta.0.20200325095142-cfcf4fb7625a h1:rIjpCsfzsmQuSUNs8gpLAPtTKJM61aST+Yh4DZTjVJs= +github.com/btcsuite/btcd v0.20.1-beta.0.20200325095142-cfcf4fb7625a/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= +github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -69,6 +100,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa h1:XKAhUk/dtp+CV0VO6mhG2V7jA9vbcGcnYF/Ay9NjZrY= github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= @@ -76,92 +109,200 @@ github.com/ethereum/go-ethereum v1.9.12 h1:EPtimwsp/KGDSiXcNunzsI4kefdsMHZGJntKx github.com/ethereum/go-ethereum v1.9.12/go.mod h1:PvsVkQmhZFx92Y+h2ylythYlheEDt/uBgFbl61Js/jo= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c h1:zqAKixg3cTcIasAMJV+EcfVbWwLpOZ7LeoWJvcuD/5Q= github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/archiver/v3 v3.3.0/go.mod h1:YnQtqsp+94Rwd0D/rk5cnLrxusUBUXg+08Ebtr1Mqao= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce h1:X0jFYGnHemYDIW6jlc+fSI8f9Cg+jqCnClYP2WgZT/A= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/schollz/progressbar/v2 v2.15.0/go.mod h1:UdPq3prGkfQ7MOzZKlDRpYKcFqEMczbD7YmbPgpzKMI= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= +github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/skycoin/skycoin v0.26.0/go.mod h1:78nHjQzd8KG0jJJVL/j0xMmrihXi70ti63fh8vXScJw= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= @@ -174,40 +315,105 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.2/go.mod h1:LiqdCg1Cu7TPWxEvPjPa0TGYxCsy4pHNTN9gGluwBpQ= diff --git a/pkg/api/gateway.go b/pkg/api/gateway.go index f52e6dc..25679af 100644 --- a/pkg/api/gateway.go +++ b/pkg/api/gateway.go @@ -6,8 +6,6 @@ import ( "github.com/SkycoinProject/multicoin-wallet/pkg/coin" ) -//go:generate mockery -name Gatewayer -case underscore -inpkg -testonly - // Gateway is the api gateway type Gateway struct { *coin.CoinManager @@ -22,5 +20,5 @@ func NewGateway(cm *coin.CoinManager) *Gateway { // Gatewayer interface for Gateway methods type Gatewayer interface { - SetupMultiCoinRoutes(prefix string, handler func(endpoint string, handler http.Handler)) + SetupCoinRoutes(prefix string, webhandler func(string, http.Handler)) } diff --git a/pkg/api/http.go b/pkg/api/http.go index c042682..b132945 100644 --- a/pkg/api/http.go +++ b/pkg/api/http.go @@ -26,6 +26,12 @@ type muxConfig struct { host string } +// HTTPError is included in an HTTPResponse +type HTTPError struct { + Message string `json:"message"` + Code int `json:"code"` +} + // Server exposes an HTTP API type Server struct { server *http.Server @@ -113,7 +119,7 @@ func newServerMux(c muxConfig, gateway Gatewayer) *http.ServeMux { webHandler("/api/"+apiVersion1+endpoint, handler) } - gateway.SetupMultiCoinRoutes("/multicoin", webHandlerV1) + gateway.SetupCoinRoutes("/multicoin", webHandlerV1) return mux } diff --git a/pkg/coin/btc/btc.go b/pkg/coin/btc/btc.go index 45c302a..635deec 100644 --- a/pkg/coin/btc/btc.go +++ b/pkg/coin/btc/btc.go @@ -1,21 +1,64 @@ package btc import ( + "encoding/json" + "fmt" "net/http" - "github.com/btcsuite/btcd/rpcclient" + "github.com/SkycoinProject/multicoin-wallet/pkg/coin/btc/rpc" ) type BTC struct { - rpc *rpcclient.Client + rpc *rpc.Client } func New() *BTC { + // run btcd node with --notls flag + client := rpc.NewClient("dmsg://0311607e59d1d0dc07fa33c641d31af10b8081de57b1c3c0d732804099f6a64dcb:8080/") return &BTC{ - rpc: &rpcclient.Client{}, + rpc: client, } } -func (btc *BTC) SetupRoutes(prefix string, handler func(endpoint string, handler http.Handler)) { +func (btc *BTC) SetupRoutes(prefix string, webhandler func(string, http.Handler)) { + webhandler(fmt.Sprintf("%s/balance", prefix), BalanceHandler(btc.rpc)) +} + +func BalanceHandler(rpc *rpc.Client) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + amt, err := rpc.GetBalance("account1") + if err != nil { + Error500(w, err.Error()) + } + + SendJSONOr500(w, amt) + } +} + +// SendJSONOr500 writes an object as JSON, writing a 500 error if it fails +func SendJSONOr500(w http.ResponseWriter, m interface{}) { + out, err := json.MarshalIndent(m, "", " ") + if err != nil { + Error500(w, err.Error()) + return + } + + w.Header().Add("Content-Type", "application/json") + + if _, err := w.Write(out); err != nil { + Error500(w, err.Error()) + } +} + +func Error500(w http.ResponseWriter, msg string) { + ErrorXXX(w, http.StatusInternalServerError, msg) +} + +func ErrorXXX(w http.ResponseWriter, status int, msg string) { + httpMsg := fmt.Sprintf("%d %s", status, http.StatusText(status)) + if msg != "" { + httpMsg = fmt.Sprintf("%s - %s", httpMsg, msg) + } + http.Error(w, httpMsg, status) } diff --git a/pkg/coin/btc/rpc/client.go b/pkg/coin/btc/rpc/client.go new file mode 100644 index 0000000..314bb5a --- /dev/null +++ b/pkg/coin/btc/rpc/client.go @@ -0,0 +1,132 @@ +package rpc + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/SkycoinProject/dmsg" + dmsghttp "github.com/SkycoinProject/dmsg-http" + "github.com/SkycoinProject/dmsg/cipher" + "github.com/SkycoinProject/dmsg/disc" + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcutil" +) + +const ( + dialTimeout = 60 * time.Second + httpClientTimeout = 120 * time.Second + tlsHandshakeTimeout = 60 * time.Second + + // ContentTypeJSON json content type header + ContentTypeJSON = "application/json" + // ContentTypeForm form data content type header + ContentTypeForm = "application/x-www-form-urlencoded" +) + +type Client struct { + httpClient *http.Client + addr string +} + +// rawResponse is a partially-unmarshaled JSON-RPC response. For this +// to be valid (according to JSON-RPC 1.0 spec), ID may not be nil. +type rawResponse struct { + Result json.RawMessage `json:"result"` + Error *btcjson.RPCError `json:"error"` +} + +func NewClient(addr string) *Client { + discovery := disc.NewHTTP("http://dmsg.discovery.skywire.cc") + cPK, cSK := cipher.GenerateKeyPair() + dmsgClient := dmsg.NewClient(cPK, cSK, discovery, dmsg.DefaultConfig()) + go dmsgClient.Serve() + + time.Sleep(time.Second) // wait for dmsg client to be ready + + dmsgTransport := dmsghttp.Transport{ + DmsgClient: dmsgClient, + } + + httpClient := &http.Client{ + Transport: dmsgTransport, + Timeout: httpClientTimeout, + } + addr = strings.TrimRight(addr, "/") + addr += "/" + return NewClientWithHTTPClient(addr, httpClient) +} + +func NewClientWithHTTPClient(addr string, httpClient *http.Client) *Client { + return &Client{ + httpClient: httpClient, + addr: addr, + } +} + +func (c *Client) GetBalance(account string) (*btcutil.Amount, error) { + cmd := btcjson.NewGetBalanceCmd(&account, nil) + + resp, err := c.sendCmd(cmd) + if err != nil { + return nil, err + } + + fmt.Println(resp) + + // Unmarshal result as a floating point number. + var balance float64 + err = json.Unmarshal(resp.Result, &balance) + if err != nil { + return nil, err + } + + amount, err := btcutil.NewAmount(balance) + if err != nil { + return nil, err + } + + return &amount, nil + +} + +func (c *Client) sendCmd(cmd interface{}) (*rawResponse, error) { + // Marshal the command. + marshalledJSON, err := btcjson.MarshalCmd(1, cmd) + if err != nil { + return nil, err + } + + bodyReader := bytes.NewReader(marshalledJSON) + httpReq, err := http.NewRequest("POST", c.addr, bodyReader) + if err != nil { + return nil, err + } + + httpReq.Close = true + httpReq.Header.Set("Content-Type", "application/json") + httpReq.SetBasicAuth("user", "password") + httpResponse, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, err + } + + // Read the raw bytes and close the response. + respBytes, err := ioutil.ReadAll(httpResponse.Body) + _ = httpResponse.Body.Close() + if err != nil { + return nil, err + } + + var resp rawResponse + err = json.Unmarshal(respBytes, &resp) + if err != nil { + return nil, err + } + + return &resp, nil +} diff --git a/pkg/coin/coin.go b/pkg/coin/coin.go index 7ff42f8..237198c 100644 --- a/pkg/coin/coin.go +++ b/pkg/coin/coin.go @@ -5,5 +5,5 @@ import ( ) type Coin interface { - SetupRoutes(prefix string, handler func(endpoint string, handler http.Handler)) + SetupRoutes(prefix string, webhandler func(string, http.Handler)) } diff --git a/pkg/multicoin/multicoin.go b/pkg/multicoin/multicoin.go index 2b5dbc4..2514ee6 100644 --- a/pkg/multicoin/multicoin.go +++ b/pkg/multicoin/multicoin.go @@ -9,6 +9,9 @@ import ( "sync" "time" + "github.com/SkycoinProject/multicoin-wallet/pkg/coin" + "github.com/SkycoinProject/multicoin-wallet/pkg/coin/btc" + "github.com/SkycoinProject/skycoin/src/util/apputil" "github.com/SkycoinProject/skycoin/src/util/logging" @@ -94,7 +97,18 @@ func (m *MultiCoin) Run() error { // Catch SIGUSR1 (prints runtime stack to stdout) go apputil.CatchDebug() - apiServer, err = m.createServer(host, api.NewGateway()) + btcInstance := btc.New() + coins := map[coin.Ticker]coin.Coin{ + "btc": btcInstance, + } + manager, err := coin.NewCoinManager(coins) + if err != nil { + m.logger.Error(err) + retErr = err + goto earlyShutdown + } + + apiServer, err = m.createServer(host, api.NewGateway(manager)) if err != nil { m.logger.Error(err) retErr = err diff --git a/pkg/wallet/deterministic_wallet.go b/pkg/wallet/deterministic_wallet.go index 64e8422..06ee31f 100644 --- a/pkg/wallet/deterministic_wallet.go +++ b/pkg/wallet/deterministic_wallet.go @@ -146,6 +146,7 @@ func (w *DeterministicWallet) GenerateAddresses(num uint64) ([]cipher.Addresser, makeAddress := w.Meta.AddressConstructor() for i, s := range seckeys { p := cipher.MustPubKeyFromSecKey(s) + a := makeAddress(p) addrs[i] = a w.Entries = append(w.Entries, Entry{ diff --git a/pkg/wallet/readable.go b/pkg/wallet/readable.go index 47d327b..4d1fe69 100644 --- a/pkg/wallet/readable.go +++ b/pkg/wallet/readable.go @@ -1,9 +1,12 @@ package wallet import ( + "encoding/hex" "errors" "fmt" + "github.com/SkycoinProject/skycoin/src/cipher/secp256k1-go" + "github.com/SkycoinProject/multicoin-wallet/pkg/coin/eth" "github.com/SkycoinProject/skycoin/src/cipher" @@ -27,7 +30,12 @@ func NewReadableEntry(coinType CoinType, walletType string, e Entry) ReadableEnt } if !e.Public.Null() { - re.Public = e.Public.Hex() + switch coinType { + case CoinTypeEthereum: + re.Public = hex.EncodeToString(secp256k1.UncompressPubkey(e.Public[:])) + default: + re.Public = e.Public.Hex() + } } if !e.Secret.Null() { diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/README.md b/vendor/github.com/SkycoinProject/skycoin/src/api/README.md deleted file mode 100644 index 1999f12..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/README.md +++ /dev/null @@ -1,4528 +0,0 @@ -# REST API Documentation - -API default service port is `6420`. However, if running the desktop or standalone releases from the website, the port is randomized by default. - -A REST API implemented in Go is available, -see [Skycoin REST API Client Godoc](https://godoc.org/github.com/SkycoinProject/skycoin/src/api#Client). - -The API has two versions, `/api/v1` and `/api/v2`. - - - -- [API Version 1](#api-version-1) -- [API Version 2](#api-version-2) -- [API Sets](#api-sets) -- [Authentication](#authentication) -- [CSRF](#csrf) - - [Get current csrf token](#get-current-csrf-token) -- [General system checks](#general-system-checks) - - [Health check](#health-check) - - [Version info](#version-info) - - [Prometheus metrics](#prometheus-metrics) -- [Simple query APIs](#simple-query-apis) - - [Get balance of addresses](#get-balance-of-addresses) - - [Get unspent output set of address or hash](#get-unspent-output-set-of-address-or-hash) - - [Verify an address](#verify-an-address) -- [Wallet APIs](#wallet-apis) - - [Get wallet](#get-wallet) - - [Get unconfirmed transactions of a wallet](#get-unconfirmed-transactions-of-a-wallet) - - [Get wallets](#get-wallets) - - [Get wallet folder name](#get-wallet-folder-name) - - [Generate wallet seed](#generate-wallet-seed) - - [Verify wallet Seed](#verify-wallet-seed) - - [Create wallet](#create-wallet) - - [Generate new address in wallet](#generate-new-address-in-wallet) - - [Change wallet label](#change-wallet-label) - - [Get wallet balance](#get-wallet-balance) - - [Create transaction](#create-transaction) - - [Sign transaction](#sign-transaction) - - [Unload wallet](#unload-wallet) - - [Encrypt wallet](#encrypt-wallet) - - [Decrypt wallet](#decrypt-wallet) - - [Get wallet seed](#get-wallet-seed) - - [Recover encrypted wallet by seed](#recover-encrypted-wallet-by-seed) -- [Key-value storage APIs](#key-value-storage-apis) - - [Get all storage values](#get-all-storage-values) - - [Add value to storage](#add-value-to-storage) - - [Remove value from storage](#remove-value-from-storage) -- [Transaction APIs](#transaction-apis) - - [Get unconfirmed transactions](#get-unconfirmed-transactions) - - [Create transaction from unspent outputs or addresses](#create-transaction-from-unspent-outputs-or-addresses) - - [Get transaction info by id](#get-transaction-info-by-id) - - [Get raw transaction by id](#get-raw-transaction-by-id) - - [Inject raw transaction](#inject-raw-transaction) - - [Get transactions for addresses](#get-transactions-for-addresses) - - [Resend unconfirmed transactions](#resend-unconfirmed-transactions) - - [Verify encoded transaction](#verify-encoded-transaction) -- [Block APIs](#block-apis) - - [Get blockchain metadata](#get-blockchain-metadata) - - [Get blockchain progress](#get-blockchain-progress) - - [Get block by hash or seq](#get-block-by-hash-or-seq) - - [Get blocks in specific range](#get-blocks-in-specific-range) - - [Get last N blocks](#get-last-n-blocks) -- [Uxout APIs](#uxout-apis) - - [Get uxout](#get-uxout) - - [Get historical unspent outputs for an address](#get-historical-unspent-outputs-for-an-address) -- [Coin supply related information](#coin-supply-related-information) - - [Coin supply](#coin-supply) - - [Richlist show top N addresses by uxouts](#richlist-show-top-n-addresses-by-uxouts) - - [Count unique addresses](#count-unique-addresses) -- [Network status](#network-status) - - [Get information for a specific connection](#get-information-for-a-specific-connection) - - [Get a list of all connections](#get-a-list-of-all-connections) - - [Get a list of all default connections](#get-a-list-of-all-default-connections) - - [Get a list of all trusted connections](#get-a-list-of-all-trusted-connections) - - [Get a list of all connections discovered through peer exchange](#get-a-list-of-all-connections-discovered-through-peer-exchange) - - [Disconnect a peer](#disconnect-a-peer) -- [Migrating from the unversioned API](#migrating-from-the-unversioned-api) -- [Migrating from the JSONRPC API](#migrating-from-the-jsonrpc-api) -- [Migrating from /api/v1/spend](#migrating-from-apiv1spend) -- [Migration from /api/v1/explorer/address](#migration-from-apiv1exploreraddress) - - - -## API Version 1 - -`/api/v1` endpoints have no standard format. Most of them accept formdata in POST requests, -but a few accept `application/json` instead. Most of them return JSON but one or two -return a plaintext string. - -All endpoints will set an appropriate HTTP status code, using `200` for success and codes greater than or equal to `400` for error. - -`/api/v1` endpoints guarantee backwards compatibility. - -## API Version 2 - -*Note: API Version 2 is under development, and not stable. The guidelines here are subject to change.* - -`/api/v2` endpoints have a standard format. - -All `/api/v2` `POST` endpoints accept only `application/json` and return `application/json`. - -All `/api/v2` `GET` requires accept data in the query string. -In the future we may have choose to have `GET` requests also accept `POST` with a JSON body, -to support requests with a large query body, such as when requesting data for a large number -of addresses or transactions. - -`/api/v2` responses are always JSON. If there is an error, the JSON object will -look like this: - -```json -{ - "error": { - "code": 400, - "message": "bad arguments", - } -} -``` - -Response data will be included in a `"data"` field, which will always be a JSON object (not an array). - -Some endpoints may return both `"error"` and `"data"`. This will be noted in the documentation for that endpoint. - -All responses will set an appropriate HTTP status code indicating an error, and it will be equal to the value of `response["error"]["code"]`. - -Since `/api/v2` is still under development, there are no guarantees for backwards compatibility. -However, any changes to the API will be recorded in the [changelog](../../CHANGELOG.md). - -Under some circumstances an error response body may not be valid JSON. -Any client consuming the API should accomodate this and conditionally parse JSON for non-`200` responses. - -## API Sets - -API endpoints are grouped into "sets" which can be toggled with the command line parameters -`-enable-api-sets`, `-disable-api-sets` and `-enable-all-api-sets`. - -These API sets are: - -* `READ` - All query-related endpoints, they do not modify the state of the program -* `STATUS` - A subset of `READ`, these endpoints report the application, network or blockchain status -* `TXN` - Enables `/api/v1/injectTransaction` and `/api/v1/resendUnconfirmedTxns` without enabling wallet endpoints -* `WALLET` - These endpoints operate on local wallet files -* `PROMETHEUS` - This is the `/api/v2/metrics` method exposing in Prometheus text format the default metrics for Skycoin node application -* `NET_CTRL` - The `/api/v1/network/connection/disconnect` method, intended for network administration endpoints -* `INSECURE_WALLET_SEED` - This is the `/api/v1/wallet/seed` endpoint, used to decrypt and return the seed from an encrypted wallet. It is only intended for use by the desktop client. -* `STORAGE` - This is the `/api/v2/data` endpoint, used to interact with the key-value storage. - -## Authentication - -Authentication can be enabled with the `-web-interface-username` and `-web-interface-password` options. -The username and password should be provided in an `Authorization: Basic` header. - -Authentication can only be enabled when using HTTPS with `-web-interface-https`, unless `-web-interface-plaintext-auth` is enabled. - -## CSRF - -All `POST`, `PUT` and `DELETE` requests require a CSRF token, obtained with a `GET /api/v1/csrf` call. -The token must be placed in the `X-CSRF-Token` header. A token is only valid -for 30 seconds and it is expected that the client obtains a new CSRF token -for each request. Requesting a CSRF token invalidates any previous CSRF token. - -A request rejected for invalid or expired CSRF will respond with `403 Forbidden - invalid CSRF token` -as the response body. - -### Get current csrf token - -API sets: any - -``` -URI: /api/v1/csrf -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/csrf -``` - -Result: - -```json -{ - "csrf_token": "klSgXoMOFTvEnt8KptBvHjhlFnW0OIkzyFVn4i8frDvIus9iLsFukqA9sM9Rxf3pLZHRLr82vBQxTq50vbYA8g" -} -``` - -## General system checks - -### Health check - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/health -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/health -``` - -Response: - -```json -{ - "blockchain": { - "head": { - "seq": 58894, - "block_hash": "3961bea8c4ab45d658ae42effd4caf36b81709dc52a5708fdd4c8eb1b199a1f6", - "previous_block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "timestamp": 1537581604, - "fee": 485194, - "version": 0, - "tx_body_hash": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "ux_hash": "f7d30ecb49f132283862ad58f691e8747894c9fc241cb3a864fc15bd3e2c83d3" - }, - "unspents": 38171, - "unconfirmed": 1, - "time_since_last_block": "4m46s" - }, - "version": { - "version": "0.25.0", - "commit": "8798b5ee43c7ce43b9b75d57a1a6cd2c1295cd1e", - "branch": "develop" - }, - "coin": "skycoin", - "user_agent": "skycoin:0.25.0", - "open_connections": 8, - "outgoing_connections": 5, - "incoming_connections": 3, - "uptime": "6m30.629057248s", - "csrf_enabled": true, - "csp_enabled": true, - "wallet_api_enabled": true, - "gui_enabled": true, - "user_verify_transaction": { - "burn_factor": 10, - "max_transaction_size": 32768, - "max_decimals": 3 - }, - "unconfirmed_verify_transaction": { - "burn_factor": 10, - "max_transaction_size": 32768, - "max_decimals": 3 - }, - "started_at": 1542443907, - "fiber": { - "name": "skycoin", - "display_name": "Skycoin", - "ticker": "SKY", - "coin_hours_display_name": "Coin Hours", - "coin_hours_display_name_singular": "Coin Hour", - "coin_hours_ticker": "SCH", - "explorer_url": "https://explorer.skycoin.com", - "bip44_coin": 8000 - } -} -``` - -### Version info - -API sets: any - -``` -URI: /api/v1/version -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/version -``` - -Result: - -```json -{ - "version": "0.20.0", - "commit": "cc733e9922d85c359f5f183d3a3a6e42c73ccb16", - "branch": "develop" -} -``` - -### Prometheus metrics - -API sets: `PROMETHEUS` - -``` -URI: /api/v2/metrics -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v2/metrics -``` - -Result: - -``` -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 5.31e-05 -go_gc_duration_seconds{quantile="0.25"} 0.000158 -go_gc_duration_seconds{quantile="0.5"} 0.0001789 -go_gc_duration_seconds{quantile="0.75"} 0.0002216 -go_gc_duration_seconds{quantile="1"} 0.0005878 -go_gc_duration_seconds_sum 0.3881053 -go_gc_duration_seconds_count 1959 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 30 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 2.862168e+06 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 4.462792584e+09 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.794588e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 4.7917586e+07 -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. -# TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 2.392064e+06 -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. -# TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 2.862168e+06 -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. -# TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 6.0973056e+07 -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. -# TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 5.087232e+06 -# HELP go_memstats_heap_objects Number of allocated objects. -# TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 16326 -# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. -# TYPE go_memstats_heap_released_bytes_total counter -go_memstats_heap_released_bytes_total 0 -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. -# TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 6.6060288e+07 -# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. -# TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.5366276699863462e+09 -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -go_memstats_lookups_total 0 -# HELP go_memstats_mallocs_total Total number of mallocs. -# TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 4.7933912e+07 -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. -# TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 6912 -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. -# TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 16384 -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. -# TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 76000 -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. -# TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 180224 -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. -# TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 5.576912e+06 -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. -# TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 792284 -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. -# TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.048576e+06 -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. -# TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.048576e+06 -# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. -# TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 7.2284408e+07 -# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. -# TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 36.04 -# HELP process_max_fds Maximum number of open file descriptors. -# TYPE process_max_fds gauge -process_max_fds 1.048576e+06 -# HELP process_open_fds Number of open file descriptors. -# TYPE process_open_fds gauge -process_open_fds 15 -# HELP process_resident_memory_bytes Resident memory size in bytes. -# TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 4.9025024e+07 -# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.53662761869e+09 -# HELP process_virtual_memory_bytes Virtual memory size in bytes. -# TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 8.22317056e+08 -``` - - -## Simple query APIs - -### Get balance of addresses - -API sets: `READ` - -``` -URI: /api/v1/balance -Method: GET, POST -Args: - addrs: comma-separated list of addresses. must contain at least one address -``` - -Returns the cumulative and individual balances of one or more addresses. -The `POST` method can be used if many addresses need to be queried. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/balance?addrs=7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD,nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq,2jBbGxZRGoQG1mqhPBnXnLTxK6oxsTf8os6 -``` - -Result: - -```json -{ - "confirmed": { - "coins": 21000000, - "hours": 142744 - }, - "predicted": { - "coins": 21000000, - "hours": 142744 - }, - "addresses": { - "2jBbGxZRGoQG1mqhPBnXnLTxK6oxsTf8os6": { - "confirmed": { - "coins": 0, - "hours": 0 - }, - "predicted": { - "coins": 0, - "hours": 0 - } - }, - "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD": { - "confirmed": { - "coins": 9000000, - "hours": 88075 - }, - "predicted": { - "coins": 9000000, - "hours": 88075 - } - }, - "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq": { - "confirmed": { - "coins": 12000000, - "hours": 54669 - }, - "predicted": { - "coins": 12000000, - "hours": 54669 - } - } - } -} -``` - -### Get unspent output set of address or hash - -API sets: `READ` - -``` -URI: /api/v1/outputs -Method: GET, POST -Args: - addrs: address list, joined with "," - hashes: hash list, joined with "," -``` - -Addrs and hashes cannot be combined. - -In the response, `"head_outputs"` are outputs in the current unspent output set, -`"outgoing_outputs"` are head outputs that are being spent by an unconfirmed transaction, -and `"incoming_outputs"` are outputs that will be created by an unconfirmed transaction. - -The current head block header is returned as `"head"`. - -The `POST` method can be used if many addresses or hashes need to be queried. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/outputs?addrs=6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY -``` - -or - -```sh -curl http://127.0.0.1:6420/api/v1/outputs?hashes=7669ff7350d2c70a88093431a7b30d3e69dda2319dcb048aa80fa0d19e12ebe0 -``` - -Result: - -```json -{ - "head": { - "seq": 58891, - "block_hash": "d9ca9442febd8788de0a3093158943beca228017bf8c9c9b8529a382fad8d991", - "previous_block_hash": "098ea5c6e12370c38529ef7c7c38779f83d05f707affb747022eee77332ba510", - "timestamp": 1537580414, - "fee": 2165, - "version": 0, - "tx_body_hash": "c488835c85ccb153a6d42b39aaae01c3e30d16de33de282f4b3f6fa1ccf6f7eb", - "ux_hash": "f7d30ecb49f132283862ad58f691e8747894c9fc241cb3a864fc15bd3e2c83d3" - }, - "head_outputs": [ - { - "hash": "7669ff7350d2c70a88093431a7b30d3e69dda2319dcb048aa80fa0d19e12ebe0", - "block_seq": 22, - "time": 1494275011, - "src_tx": "b51e1933f286c4f03d73e8966186bafb25f64053db8514327291e690ae8aafa5", - "address": "6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY", - "coins": "2.000000", - "hours": 633, - "calculated_hours": 10023 - }, - ], - "outgoing_outputs": [], - "incoming_outputs": [] -} -``` - -### Verify an address - -API sets: `READ` - -``` -URI: /api/v2/address/verify -Method: POST -Content-Type: application/json -Args: {"address": "
"} -``` - -Parses and validates a Skycoin address. Returns the address version in the response. - -Error responses: - -* `400 Bad Request`: The request body is not valid JSON or the address is missing from the request body -* `422 Unprocessable Entity`: The address is invalid - -Example for a valid address: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/address/verify \ - -H 'Content-Type: application/json' \ - -d '{"address":"2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2"}' -``` - -Result: - -```json -{ - "data": { - "version": 0, - } -} -``` - -Example for an invalid address: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/address/verify \ - -H 'Content-Type: application/json' \ - -d '{"address":"2aTnQe3ZupkG6k8S81brNC3JycGV2Em71F2"}' -``` - -Result: - -```json -{ - "error": { - "message": "Invalid checksum", - "code": 422 - } -} -``` - -## Wallet APIs - -### Get wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet -Method: GET -Args: - id: Wallet ID [required] -``` - -Example ("deterministic" wallet): - -```sh -curl http://127.0.0.1:6420/api/v1/wallet?id=2017_11_25_e5fb.wlt -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "2017_11_25_e5fb.wlt", - "label": "test", - "type": "deterministic", - "version": "0.2", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false - }, - "entries": [ - { - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1" - }, - { - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "public_key": "02539528248a1a2c4f0b73233491103ca83b40249dac3ae9eee9a10b9f9debd9a3" - } - ] -} -``` - -Example ("bip44" wallet): - -```sh -curl http://127.0.0.1:6420/api/v1/wallet?id=2017_11_25_e5fb.wlt -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "2017_11_25_e5fb.wlt", - "label": "test", - "type": "bip44", - "version": "0.3", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false, - "bip44_coin": 8000, - }, - "entries": [ - { - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1", - "child_number": 0, - "change": 0 - }, - { - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "public_key": "02539528248a1a2c4f0b73233491103ca83b40249dac3ae9eee9a10b9f9debd9a3", - "child_number": 1, - "change": 0 - }, - { - "address": "8C5icxR9zdkYTZZTVV3cCX7QoK4EkLuK4p", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1", - "child_number": 0, - "change": 1 - } - ] -} -``` - - -### Get unconfirmed transactions of a wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/transactions -Method: GET -Args: - id: Wallet ID - verbose: [bool] include verbose transaction input data -``` - -Returns all unconfirmed transactions for all addresses in a given wallet - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -The calculated hours are based upon the current system time, and are approximately -equal to the hours the output would have if it become confirmed immediately. - - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/wallet/transactions?id=2017_11_25_e5fb.wlt -``` - -Result: - -```json -{ - "transactions": [ - { - "transaction": { - "length": 317, - "type": 0, - "txid": "76ecbabc53ea2a3be46983058433dda6a3cf7ea0b86ba14d90b932fa97385de7", - "inner_hash": "5d55837bb0cbda9c9323ff9aafd7c3d31d0d38638346172fbe2d9078ebaa892a", - "sigs": [ - "464b7724302178c1cfeacadaaf3556a3b7e5259adf51919476c3acc695747ed244b5ce2187ce7bedb6ad65c71f7f7ff3fa6805e64fe5da3aaa00ad563c7424f600", - "1155537b0391d4a6ee5eac07dee5798e953dca3a7c30643403dd2d326582c7d35080a16dc22644782ce1087bfc3bd06c2bf68e9a98e3989d90831646a9be2c9101" - ], - "inputs": [ - "782a8662efb0e933cab7d3ae9429ab53c4208cf44d8cdc07c2fbd7204b6b5cad", - "2f6b61a44086588c4eaa56a5dd9f1e0be2528861a6731608fcec38891b95db91" - ], - "outputs": [ - { - "uxid": "bd302ef776efa8548183b89f21e90649f21b90fe2d2e90ecc1b880f2d995f226", - "dst": "2UXZTg4ZHF6715b6tRhtaqceuQQ3G79GiZg", - "coins": "998.000000", - "hours": 247538 - }, - { - "uxid": "31058b6bfb30bfd441aec00929e75782bce47c8a75787ba519dbb268f89d2c4b", - "dst": "2awsJ2CR5H6QXCF2hwDjcvcAH9SgyfxCxgz", - "coins": "1.000000", - "hours": 247538 - } - ] - }, - "received": "2018-03-16T18:03:57.139109904+05:30", - "checked": "2018-03-16T18:03:57.139109904+05:30", - "announced": "0001-01-01T00:00:00Z", - "is_valid": true - } - ] -} -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/wallet/transactions?id=2017_11_25_e5fb.wlt&verbose=1 -``` - -Result: - -```json -{ - "transactions": [ - { - "transaction": { - "length": 317, - "type": 0, - "txid": "76ecbabc53ea2a3be46983058433dda6a3cf7ea0b86ba14d90b932fa97385de7", - "inner_hash": "5d55837bb0cbda9c9323ff9aafd7c3d31d0d38638346172fbe2d9078ebaa892a", - "sigs": [ - "464b7724302178c1cfeacadaaf3556a3b7e5259adf51919476c3acc695747ed244b5ce2187ce7bedb6ad65c71f7f7ff3fa6805e64fe5da3aaa00ad563c7424f600", - "1155537b0391d4a6ee5eac07dee5798e953dca3a7c30643403dd2d326582c7d35080a16dc22644782ce1087bfc3bd06c2bf68e9a98e3989d90831646a9be2c9101" - ], - "fee": 495076, - "inputs": [ - { - "uxid": "782a8662efb0e933cab7d3ae9429ab53c4208cf44d8cdc07c2fbd7204b6b5cad", - "owner": "8C5icxR9zdkYTZZTVV3cCX7QoK4EkLuK4p", - "coins": "997.000000", - "hours": 880000, - "calculated_hours": 990000 - }, - { - "uxid": "2f6b61a44086588c4eaa56a5dd9f1e0be2528861a6731608fcec38891b95db91", - "owner": "23A1EWMZopUFLCwtXMe2CU9xTCbi5Gth643", - "coins": "2.000000", - "hours": 10, - "calculated_hours": 152 - } - ], - "outputs": [ - { - "uxid": "bd302ef776efa8548183b89f21e90649f21b90fe2d2e90ecc1b880f2d995f226", - "dst": "2UXZTg4ZHF6715b6tRhtaqceuQQ3G79GiZg", - "coins": "998.000000", - "hours": 247538 - }, - { - "uxid": "31058b6bfb30bfd441aec00929e75782bce47c8a75787ba519dbb268f89d2c4b", - "dst": "2awsJ2CR5H6QXCF2hwDjcvcAH9SgyfxCxgz", - "coins": "1.000000", - "hours": 247538 - } - ] - }, - "received": "2018-03-16T18:03:57.139109904+05:30", - "checked": "2018-03-16T18:03:57.139109904+05:30", - "announced": "0001-01-01T00:00:00Z", - "is_valid": true - } - ] -} -``` - -### Get wallets - -API sets: `WALLET` - -``` -URI: /api/v1/wallets -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/wallets -``` - -Result: - -```json -[ - { - "meta": { - "coin": "skycoin", - "filename": "2017_11_25_e5fb.wlt", - "label": "test", - "type": "deterministic", - "version": "0.2", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false - }, - "entries": [ - { - "address": "8C5icxR9zdkYTZZTVV3cCX7QoK4EkLuK4p", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1" - }, - { - "address": "23A1EWMZopUFLCwtXMe2CU9xTCbi5Gth643", - "public_key": "02539528248a1a2c4f0b73233491103ca83b40249dac3ae9eee9a10b9f9debd9a3" - } - ] - } -] -``` - -### Get wallet folder name - -API sets: `WALLET` - -``` -URI: /api/v1/wallets/folderName -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/wallets/folderName -``` - -Result: - -```json -{ - "address": "/Users/user/.skycoin/wallets" -} -``` - -### Generate wallet seed - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/newSeed -Method: GET -Args: - entropy: seed entropy [optional] - can either be 128 or 256; 128 = 12 word seed, 256 = 24 word seed - default: 128 -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/wallet/newSeed -``` - -Result: - -```json -{ - "seed": "helmet van actor peanut differ icon trial glare member cancel marble rack" -} -``` - -### Verify wallet Seed - -API sets: `WALLET` - -``` -URI: /api/v2/wallet/seed/verify -Method: POST -Args: - seed: seed to be verified -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/wallet/seed/verify \ - -H 'Content-type: application/json' \ - -d '{ "seed": "nut wife logic sample addict shop before tobacco crisp bleak lawsuit affair" }' -``` - -Result: - -```json -{ - "data": {} -} -``` - -Example (wrong bip39 seed): - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/wallet/seed/verify \ - -H 'Content-type: application/json' \ - -d '{ "seed": "wrong seed" }' -``` - -Result: - -```json -{ - "error": { - "message": "Mnemonic must have 12, 15, 18, 21 or 24 words", - "code": 422 - } -} -``` - -### Create wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/create -Method: POST -Args: - seed: wallet seed [required] - seed-passphrase: wallet seed passphrase [optional, bip44 type wallet only] - type: wallet type [required, one of "deterministic", "bip44" or "xpub"] - bip44-coin: BIP44 coin type [optional, defaults to 8000 (skycoin's coin type), only valid if type is "bip44"] - xpub: xpub key [required for xpub wallets] - label: wallet label [required] - scan: the number of addresses to scan ahead for balances [optional, must be > 0] - encrypt: encrypt wallet [optional, bool value] - password: wallet password [optional, must be provided if encrypt is true] -``` - -Example (deterministic): - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/create \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'seed=$seed' \ - -d 'type=deterministic' \ - -d 'label=$label' \ - -d 'scan=5' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "2017_05_09_d554.wlt", - "label": "test", - "type": "deterministic", - "version": "0.3", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false - }, - "entries": [ - { - "address": "y2JeYS4RS8L9GYM7UKdjLRyZanKHXumFoH", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1" - } - ] -} -``` - -Example (bip44): - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/create \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'seed=$seed' \ - -d 'seed-passphrase=$seed' \ - -d 'type=bip44' \ - -d 'label=$label' \ - -d 'scan=5' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "2017_05_09_d554.wlt", - "label": "test", - "type": "bip44", - "version": "0.3", - "crypto_type": "scrypt-chacha20poly1305", - "timestamp": 1511640884, - "encrypted": true, - "bip44_coin": 8000, - }, - "entries": [ - { - "address": "y2JeYS4RS8L9GYM7UKdjLRyZanKHXumFoH", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1", - "child_number": 0, - "change": 0 - } - ] -} -``` - -Example (xpub): - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/create \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'type=xpub' \ - -d 'xpub=xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8' \ - -d 'label=$label' \ - -d 'scan=5' -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "2017_05_09_d554.wlt", - "label": "test", - "type": "bip44", - "version": "0.4", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false, - "xpub": "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8" - }, - "entries": [ - { - "address": "y2JeYS4RS8L9GYM7UKdjLRyZanKHXumFoH", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1", - "child_number": 0 - } - ] -} -``` - -### Generate new address in wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/newAddress -Method: POST -Args: - id: wallet file name - num: the number you want to generate - password: wallet password -``` - -For `bip44` type wallets, the new addresses will be generated on the `external` chain (`change=0`). - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/newAddress \ - -H 'Content-Type: x-www-form-urlencoded' \ - -d 'id=2017_05_09_d554.wlt' \ - -d 'num=2' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "addresses": [ - "TDdQmMgbEVTwLe8EAiH2AoRc4SjoEFKrHB" - ] -} -``` - -### Change wallet label - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/update -Method: POST -Args: - id: wallet file name - label: wallet label -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/update \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'id=$id' \ - -d 'label=$label' -``` - -Result: - -```json -"success" -``` - -### Get wallet balance - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/balance -Method: GET -Args: - id: wallet file name -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/wallet/balance?id=2018_03_07_3088.wlt -``` - -Result: - -```json -{ - "confirmed": { - "coins": 210400000, - "hours": 1873147 - }, - "predicted": { - "coins": 210400000, - "hours": 1873147 - }, - "addresses": { - "AXrFisGovRhRHipsbGahs4u2hXX7pDRT5p": { - "confirmed": { - "coins": 1250000, - "hours": 941185 - }, - "predicted": { - "coins": 1250000, - "hours": 941185 - } - }, - "AtNorKBpCgkSRL7zES7aAQyNjqjqPp2QJU": { - "confirmed": { - "coins": 1150000, - "hours": 61534 - }, - "predicted": { - "coins": 1150000, - "hours": 61534 - } - }, - "VUv9ehMZWmDvwWV36BQ3eL1ujb4MQ5TGyK": { - "confirmed": { - "coins": 208000000, - "hours": 870428 - }, - "predicted": { - "coins": 208000000, - "hours": 870428 - } - }, - "j4mbF1fTe8jgXbrRARZSBjDpD1hMGSe1E4": { - "confirmed": { - "coins": 0, - "hours": 0 - }, - "predicted": { - "coins": 0, - "hours": 0 - } - }, - "uyqBPcRCWucHXs18e9VZyNEeuNsD5tFDhy": { - "confirmed": { - "coins": 0, - "hours": 0 - }, - "predicted": { - "coins": 0, - "hours": 0 - } - } - } -} -``` - -### Create transaction - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/transaction -Method: POST -Content-Type: application/json -Args: JSON body, see examples -``` - -Creates a transaction, returning the transaction preview and the encoded, serialized transaction. -The `encoded_transaction` can be provided to `POST /api/v1/injectTransaction` to broadcast it to the network -if the transaction is fully signed. - -The request body includes: - -* An optional change address -* A wallet to spend from with the optional ability to restrict which addresses or which unspent outputs in the wallet to use -* A list of destinations with address and coins specified, as well as optionally specifying hours -* A configuration for how destination hours are distributed, either manual or automatic -* Additional options - -`change_address` is optional. If not provided and the wallet is a `deterministic` type -wallet, then the change address will default to an address from one of the -unspent outputs being spent as a transaction input. If the wallet is a `bip44` type -wallet, then a new, unused change address will be created. - -Example request body with manual hours selection type, unencrypted wallet and all wallet addresses may spend: - -```json -{ - "hours_selection": { - "type": "manual" - }, - "wallet_id": "foo.wlt", - "change_address": "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq", - "to": [{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.032", - "hours": "7" - }, { - "address": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "99.2", - "hours": "0" - }], - "unsigned": false, - "ignore_unconfirmed": false -} -``` - -Example request body with auto hours selection type, encrypted wallet, specified spending addresses: - -```json -{ - "hours_selection": { - "type": "auto", - "mode": "share", - "share_factor": "0.5" - }, - "wallet_id": "foo.wlt", - "password": "foobar", - "addresses": ["2iVtHS5ye99Km5PonsB42No3pQRGEURmxyc"], - "change_address": "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq", - "to": [{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.032" - }, { - "address": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "99.2" - }], - "unsigned": false, - "ignore_unconfirmed": false -} -``` - -Example request body with manual hours selection type, unencrypted wallet and spending specific unspent outputs: - -```json -{ - "hours_selection": { - "type": "manual" - }, - "wallet_id": "foo.wlt", - "unspents": ["519c069a0593e179f226e87b528f60aea72826ec7f99d51279dd8854889ed7e2", "4e4e41996297511a40e2ef0046bd6b7118a8362c1f4f09a288c5c3ea2f4dfb85"], - "change_address": "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq", - "to": [{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.032", - "hours": "7" - }, { - "address": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "99.2", - "hours": "0" - }], - "unsigned": false, - "ignore_unconfirmed": false -} -``` - - -The `hours_selection` field has two types: `manual` or `auto`. - -If `manual`, all destination hours must be specified. - -If `auto`, the `mode` field must be set. The only valid value for `mode` is `"share"`. -For the `"share"` mode, `share_factor` must also be set. This must be a decimal value greater than or equal to 0 and less than or equal to 1. -In the auto share mode, the remaining hours after the fee are shared between the destination addresses as a whole, -and the change address. Amongst the destination addresses, the shared hours are distributed proportionally. - -When using the `auto` `"share"` `mode`, if there are remaining coin hours as change, -but no coins are available as change from the wallet (which are needed to retain the coin hours as change), -the `share_factor` will switch to `1.0` so that extra coin hours are distributed to the outputs -instead of being burned as an additional fee. -For the `manual` mode, if there are leftover coin hours but no coins to make change with, -the leftover coin hours will be burned in addition to the required fee. - -All objects in `to` must be unique; a single transaction cannot create multiple outputs with the same `address`, `coins` and `hours`. - -For example, this is a valid value for `to`, if `hours_selection.type` is `"manual"`: - -```json -[{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2", - "hours": "1" -}, { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2", - "hours": "2" -}] -``` - -But this is an invalid value for `to`, if `hours_selection.type` is `"manual"`: - -```json -[{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2", - "hours": "1" -}, { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2", - "hours": "1" -}] -``` - -And this is a valid value for `to`, if `hours_selection.type` is `"auto"`: - -```json -[{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2" -}, { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.201" -}] -``` - -But this is an invalid value for `to`, if `hours_selection.type` is `"auto"`: - -```json -[{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2" -}, { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.2" -}] -``` - -To control which addresses to spend from, specify `addresses`. -A subset of the unspent outputs associated with these addresses will be chosen for spending, -based upon an internal selection algorithm. - -To control which unspent outputs to spend from, specify `unspents`. -A subset of these unspent outputs will be chosen for spending, -based upon an internal selection algorithm. - -`addresses` and `unspents` cannot be combined. - -If neither `addresses` nor `unspents` are specified, -then all outputs associated with all addresses in the wallet may be chosen from to spend with. - -`change_address` is optional. -If set, it is not required to be an address in the wallet. -If not set, it will default to one of the addresses associated with the unspent outputs being spent in the transaction. - -`ignore_unconfirmed` is optional and defaults to `false`. -When `false`, the API will return an error if any of the unspent outputs -associated with the wallet addresses or the wallet outputs appear as spent in -a transaction in the unconfirmed transaction pool. -When `true`, the API will ignore unspent outputs that appear as spent in -a transaction in the unconfirmed transaction pool when building the transaction, -but not return an error. - -`unsigned` is optional and defaults to `false`. -When `true`, the transaction will not be signed by the wallet. -An unsigned transaction will be returned. -The `"txid"` value of the `"transaction"` object will need to be updated -after signing the transaction. -The unsigned `encoded_transaction` can be sent to `POST /api/v2/wallet/transaction/sign` for signing. - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/transaction -H 'content-type: application/json' -d '{ - "hours_selection": { - "type": "auto", - "mode": "share", - "share_factor": "0.5" - }, - "wallet_id": "foo.wlt", - "change_address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "to": [{ - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1" - }, { - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.99" - }] -}' -``` - -Result: - -```json -{ - "transaction": { - "length": 257, - "type": 0, - "txid": "5f060918d2da468a784ff440fbba80674c829caca355a27ae067f465d0a5e43e", - "inner_hash": "97dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d", - "fee": "437691", - "sigs": [ - "6120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100" - ], - "inputs": [ - { - "uxid": "7068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b", - "address": "g4XmbmVyDnkswsQTSqYRsyoh1YqydDX1wp", - "coins": "10.000000", - "hours": "853667", - "calculated_hours": "862290", - "timestamp": 1524242826, - "block": 23575, - "txid": "ccfbb51e94cb58a619a82502bc986fb028f632df299ce189c2ff2932574a03e7" - } - ], - "outputs": [ - { - "uxid": "519c069a0593e179f226e87b528f60aea72826ec7f99d51279dd8854889ed7e2", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1.000000", - "hours": "22253" - }, - { - "uxid": "4e4e41996297511a40e2ef0046bd6b7118a8362c1f4f09a288c5c3ea2f4dfb85", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.990000", - "hours": "200046" - }, - { - "uxid": "fdeb3f77408f39e50a8e3b6803ce2347aac2eba8118c494424f9fa4959bab507", - "address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "0.010000", - "hours": "222300" - } - ] - }, - "encoded_transaction": "010100000097dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d010000006120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100010000007068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b0300000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b140420f0000000000ed5600000000000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b1302d8900000000006e0d0300000000000083874350e65e84aa6e06192408951d7aaac7809e10270000000000005c64030000000000" -} -``` - - -### Sign transaction - -API sets: `WALLET` - -``` -URI: /api/v2/wallet/transaction/sign -Method: POST -Content-Type: application/json -Args: JSON body, see examples -``` - -Signs an unsigned transaction, returning the transaction with updated signatures and the encoded, serialized transaction. -The transaction must spendable to be signed. If the inputs of the transaction are not in the unspent pool, signing fails. - -Specific transaction inputs may be signed by specifying `sign_indexes`, otherwise all transaction inputs will be signed. -`sign_indexes` is an array of positional indexes for the transaction's signature array. Indexes start at 0. - -Signing an input that is already signed in the transaction is an error. - -The `encoded_transaction` can be provided to `POST /api/v1/injectTransaction` to broadcast it to the network, if the transaction is fully signed. - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/wallet/transaction/sign -H 'content-type: application/json' -d '{ - "wallet_id": "foo.wlt", - "password": "password", - "encoded_transaction": "010100000097dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d010000006120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100010000007068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b0300000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b140420f0000000000ed5600000000000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b1302d8900000000006e0d0300000000000083874350e65e84aa6e06192408951d7aaac7809e10270000000000005c64030000000000" -}' -``` - -Example with `sign_indexes`: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/wallet/transaction/sign -H 'content-type: application/json' -d '{ - "wallet_id": "foo.wlt", - "password": "password", - "sign_indexes": [1, 2], - "encoded_transaction": "010100000097dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d010000006120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100010000007068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b0300000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b140420f0000000000ed5600000000000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b1302d8900000000006e0d0300000000000083874350e65e84aa6e06192408951d7aaac7809e10270000000000005c64030000000000" -}' -``` - -Result: - -```json -{ - "data": { - "transaction": { - "length": 257, - "type": 0, - "txid": "5f060918d2da468a784ff440fbba80674c829caca355a27ae067f465d0a5e43e", - "inner_hash": "97dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d", - "fee": "437691", - "sigs": [ - "6120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100" - ], - "inputs": [ - { - "uxid": "7068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b", - "address": "g4XmbmVyDnkswsQTSqYRsyoh1YqydDX1wp", - "coins": "10.000000", - "hours": "853667", - "calculated_hours": "862290", - "timestamp": 1524242826, - "block": 23575, - "txid": "ccfbb51e94cb58a619a82502bc986fb028f632df299ce189c2ff2932574a03e7" - } - ], - "outputs": [ - { - "uxid": "519c069a0593e179f226e87b528f60aea72826ec7f99d51279dd8854889ed7e2", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1.000000", - "hours": "22253" - }, - { - "uxid": "4e4e41996297511a40e2ef0046bd6b7118a8362c1f4f09a288c5c3ea2f4dfb85", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.990000", - "hours": "200046" - }, - { - "uxid": "fdeb3f77408f39e50a8e3b6803ce2347aac2eba8118c494424f9fa4959bab507", - "address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "0.010000", - "hours": "222300" - } - ] - }, - "encoded_transaction": "010100000097dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d010000006120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100010000007068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b0300000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b140420f0000000000ed5600000000000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b1302d8900000000006e0d0300000000000083874350e65e84aa6e06192408951d7aaac7809e10270000000000005c64030000000000" - } -} -``` - - -### Unload wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/unload -Method: POST -Args: - id: wallet file name -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/unload \ - -H 'Content-Type: x-www-form-urlencoded' \ - -d 'id=2017_05_09_d554.wlt' -``` - -### Encrypt wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/encrypt -Method: POST -Args: - id: wallet id - password: wallet password -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/encrypt \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'id=test.wlt' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "test.wlt", - "label": "test", - "type": "deterministic", - "version": "0.2", - "crypto_type": "scrypt-chacha20poly1305", - "timestamp": 1521083044, - "encrypted": true - }, - "entries": [ - { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1" - } - ] -} -``` - -### Decrypt wallet - -API sets: `WALLET` - -``` -URI: /api/v1/wallet/decrypt -Method: POST -Args: - id: wallet id - password: wallet password -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/decrypt \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d 'id=test.wlt' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "meta": { - "coin": "skycoin", - "filename": "test.wlt", - "label": "test", - "type": "deterministic", - "version": "0.2", - "crypto_type": "", - "timestamp": 1521083044, - "encrypted": false - }, - "entries": [ - { - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "public_key": "032a1218cbafc8a93233f363c19c667cf02d42fa5a8a07c0d6feca79e82d72753d" - } - ] -} -``` - -### Get wallet seed - -API sets: `INSECURE_WALLET_SEED` - -``` -URI: /api/v1/wallet/seed -Method: POST -Args: - id: wallet id - password: wallet password -``` - -This endpoint only works for encrypted wallets. -If the wallet is unencrypted, the seed will not be returned. - -If the wallet is of type `bip44` and has a seed passphrase, it will be included -in the response. Otherwise, the seed passphrase will be missing. - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/wallet/seed \ - -H 'Content-type: application/x-www-form-urlencoded' \ - -d 'id=test.wlt' \ - -d 'password=$password' -``` - -Result: - -```json -{ - "seed": "your wallet seed", - "seed_passphrase": "your optional wallet seed-passphrase" -} -``` - -### Recover encrypted wallet by seed - -API sets: `INSECURE_WALLET_SEED` - -``` -URI: /api/v2/wallet/recover -Method: POST -Args: - id: wallet id - seed: wallet seed - seed passphrase: wallet seed passphrase (bip44 wallets only) - password: [optional] password to encrypt the recovered wallet with -``` - -Recovers an encrypted wallet by providing the wallet seed and optional seed passphrase. - -Example: - -```sh -curl -X POST http://127.0.0.1/api/v2/wallet/recover - -H 'Content-Type: application/json' \ - -d '{"id":"2017_11_25_e5fb.wlt","seed":"your wallet seed","seed_passphrase":"your seed passphrase"}' -``` - -Result: - -```json -{ - "data": { - "meta": { - "coin": "skycoin", - "filename": "2017_11_25_e5fb.wlt", - "label": "test", - "type": "deterministic", - "version": "0.2", - "crypto_type": "", - "timestamp": 1511640884, - "encrypted": false - }, - "entries": [ - { - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "public_key": "0316ff74a8004adf9c71fa99808ee34c3505ee73c5cf82aa301d17817da3ca33b1" - }, - { - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "public_key": "02539528248a1a2c4f0b73233491103ca83b40249dac3ae9eee9a10b9f9debd9a3" - } - ] - } -} -``` - -## Key-value storage APIs - -Endpoints interact with the key-value storage. Each request require the `type` argument to -be passed. - -Currently allowed types: - -* `txid`: used for transaction notes -* `client`: used for generic client data, instead of using e.g. LocalStorage in the browser - -### Get all storage values - -API sets: `STORAGE` - -``` -Method: GET -URI: /api/v2/data -Args: - type: storage type - key [string]: key of the specific value to get -``` - -If key is passed, only the specific value will be returned from the storage. -Otherwise the whole dataset will be returned. - -If the key does not exist, a 404 error is returned. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v2/data?type=txid -``` - -Result: - -```json -{ - "data": { - "key1": "value", - "key2": "{\"key\":\"value\"}", - } -} -``` - -Example (key): - -```sh -curl http://127.0.0.1:6420/api/v2/data?type=txid&key=key1 -``` - -Result: - -```json -{ - "data": "value" -} -``` - -### Add value to storage - -API sets: `STORAGE` - -``` -Method: POST -URI: /api/v2/data -Args: JSON Body, see examples -``` - -Sets one or more values by key. Existing values will be overwritten. - -Example request body: - -```json -{ - "type": "txid", - "key": "key1", - "val": "val1" -} -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/data -H 'Content-Type: application/json' -d '{ - "type": "txid", - "key": "key1", - "val": "val1" -}' -``` - -Result: - -```json -{} -``` - -### Remove value from storage - -API sets: `STORAGE` - -``` -Method: DELETE -URI: /api/v2/data -Args: - type: storage type - key: key of the specific value to get -``` - -Deletes a value by key. Returns a 404 error if the key does not exist. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v2/data?type=txid&key=key1 -``` - -Result: - -```json -{} -``` - -## Transaction APIs - -### Get unconfirmed transactions - -API sets: `READ` - -``` -URI: /api/v1/pendingTxs -Method: GET -Args: - verbose [bool] include verbose transaction input data -``` - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -The calculated hours are calculated based upon the current system time, and provide an approximate -coin hour value of the output if it were to be confirmed at that instant. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/pendingTxs -``` - -Result: - -```json -[ - { - "transaction": { - "length": 317, - "type": 0, - "txid": "89578005d8730fe1789288ee7dea036160a9bd43234fb673baa6abd91289a48b", - "inner_hash": "cac977eee019832245724aa643ceff451b9d8b24612b2f6a58177c79e8a4c26f", - "sigs": [ - "3f084a0c750731dd985d3137200f9b5fc3de06069e62edea0cdd3a91d88e56b95aff5104a3e797ab4d6d417861af0c343efb0fff2e5ba9e7cf88ab714e10f38101", - "e9a8aa8860d189daf0b1dbfd2a4cc309fc0c7250fa81113aa7258f9603d19727793c1b7533131605db64752aeb9c1f4465198bb1d8dd597213d6406a0a81ed3701" - ], - "inputs": [ - "bb89d4ed40d0e6e3a82c12e70b01a4bc240d2cd4f252cfac88235abe61bd3ad0", - "170d6fd7be1d722a1969cb3f7d45cdf4d978129c3433915dbaf098d4f075bbfc" - ], - "outputs": [ - { - "uxid": "ec9cf2f6052bab24ec57847c72cfb377c06958a9e04a077d07b6dd5bf23ec106", - "dst": "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq", - "coins": "60.000000", - "hours": 2458 - }, - { - "uxid": "be40210601829ba8653bac1d6ecc4049955d97fb490a48c310fd912280422bd9", - "dst": "2iVtHS5ye99Km5PonsB42No3pQRGEURmxyc", - "coins": "1.000000", - "hours": 2458 - } - ] - }, - "received": "2017-05-09T10:11:57.14303834+02:00", - "checked": "2017-05-09T10:19:58.801315452+02:00", - "announced": "0001-01-01T00:00:00Z", - "is_valid": true - } -] -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/pendingTxs?verbose=1 -``` - -Result: - -```json -[ - { - "transaction": { - "length": 220, - "type": 0, - "txid": "d455564dcf1fb666c3846cf579ff33e21c203e2923938c6563fe7fcb8573ba44", - "inner_hash": "4e73155db8ed04a3bd2b953218efcc9122ebfbf4c55f08f50d1563e48eacf71d", - "fee": 12855964, - "sigs": [ - "17330c256a50e2117ddccf51f1980fc14380f0f9476432196ade3043668759847b97e1b209961458745684d9239541f79d9ca9255582864d30a540017ab84f2b01" - ], - "inputs": [ - { - "uxid": "27e7bc48ceca4d47e806a87100a8a98592b7618702e1cd479bf4c190462a6d09", - "owner": "23MjQipM9YsPKkYiuaBmf6m7fD54wrzHxpd", - "coins": "7815.000000", - "hours": 279089, - "calculated_hours": 13101146 - } - ], - "outputs": [ - { - "uxid": "4b4ebf62acbaece798d0dfc92fcea85768a2874dad8a9b8eb5454288deae468c", - "dst": "23MjQipM9YsPKkYiuaBmf6m7fD54wrzHxpd", - "coins": "586.000000", - "hours": 122591 - }, - { - "uxid": "781cfb134d5fdad48f3c937dfcfc66b169a305adc8abdfe92a0ec94c564913f2", - "dst": "2ehrG4VKLRuvBNWYz3U7tS75QWvzyWR89Dg", - "coins": "7229.000000", - "hours": 122591 - } - ] - }, - "received": "2018-06-20T14:14:52.415702671+08:00", - "checked": "2018-08-26T19:47:45.328131142+08:00", - "announced": "2018-08-26T19:51:47.356083569+08:00", - "is_valid": true - } -] -``` - -### Create transaction from unspent outputs or addresses - -API sets: `TXN` - -``` -URI: /api/v2/transaction -Method: POST -Args: JSON Body, see examples -``` - -Creates an unsigned transaction from a pool of unspent outputs or addresses. -`addresses` and `unspents` cannot be combined, and at least one must have elements in their array. - -The transaction will choose unspent outputs from the provided pool to construct a transaction -that satisfies the requested outputs in the `to` field. Not all unspent outputs will necessarily be used -in the transaction. - -If `ignore_unconfirmed` is true, the transaction will not use any outputs which are being spent by an unconfirmed transaction. -If `ignore_unconfirmed` is false, the endpoint returns an error if any unspent output is spent by an unconfirmed transaction. - -`change_address` is optional. If not provided then the change address will -default to an address from one of the -unspent outputs being spent as a transaction input. - -Refer to `POST /api/v1/wallet/transaction` for creating a transaction from a specific wallet. - -`POST /api/v2/wallet/transaction/sign` can be used to sign the transaction with a wallet, -but `POST /api/v1/wallet/transaction` can create and sign a transaction with a wallet in one operation instead. -Otherwise, sign the transaction separately from the API. - -The transaction must be fully valid and spendable (except for the lack of signatures) or else an error is returned. - -Example request body with manual hours selection type, spending from specific addresses, ignoring unconfirmed unspent outputs: - -```json -{ - "hours_selection": { - "type": "manual" - }, - "addresses": ["g4XmbmVyDnkswsQTSqYRsyoh1YqydDX1wp", "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS"], - "change_address": "nu7eSpT6hr5P21uzw7bnbxm83B6ywSjHdq", - "to": [{ - "address": "fznGedkc87a8SsW94dBowEv6J7zLGAjT17", - "coins": "1.032", - "hours": "7" - }, { - "address": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "99.2", - "hours": "0" - }], - "ignore_unconfirmed": false -} -``` - -Example request body with auto hours selection type, spending specific uxouts: - -```json -{ - "hours_selection": { - "type": "auto", - "mode": "share", - "share_factor": "0.5" - }, - "unspents": ["519c069a0593e179f226e87b528f60aea72826ec7f99d51279dd8854889ed7e2", "4e4e41996297511a40e2ef0046bd6b7118a8362c1f4f09a288c5c3ea2f4dfb85"], - "change_address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "to": [{ - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1" - }, { - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.99" - }] -} -``` - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v2/transaction -H 'Content-Type: application/json' -d '{ - "hours_selection": { - "type": "auto", - "mode": "share", - "share_factor": "0.5" - }, - "addresses": ["g4XmbmVyDnkswsQTSqYRsyoh1YqydDX1wp"], - "change_address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "to": [{ - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1" - }, { - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.99" - }] -}' -``` - -Result: - -```json -{ - "data": { - "transaction": { - "length": 257, - "type": 0, - "txid": "5f060918d2da468a784ff440fbba80674c829caca355a27ae067f465d0a5e43e", - "inner_hash": "97dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d", - "fee": "437691", - "sigs": [ - "6120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100" - ], - "inputs": [ - { - "uxid": "7068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b", - "address": "g4XmbmVyDnkswsQTSqYRsyoh1YqydDX1wp", - "coins": "10.000000", - "hours": "853667", - "calculated_hours": "862290", - "timestamp": 1524242826, - "block": 23575, - "txid": "ccfbb51e94cb58a619a82502bc986fb028f632df299ce189c2ff2932574a03e7" - } - ], - "outputs": [ - { - "uxid": "519c069a0593e179f226e87b528f60aea72826ec7f99d51279dd8854889ed7e2", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "1.000000", - "hours": "22253" - }, - { - "uxid": "4e4e41996297511a40e2ef0046bd6b7118a8362c1f4f09a288c5c3ea2f4dfb85", - "address": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "8.990000", - "hours": "200046" - }, - { - "uxid": "fdeb3f77408f39e50a8e3b6803ce2347aac2eba8118c494424f9fa4959bab507", - "address": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "0.010000", - "hours": "222300" - } - ] - }, - "encoded_transaction": "010100000097dd062820314c46da0fc18c8c6c10bfab1d5da80c30adc79bbe72e90bfab11d010000006120acebfa61ba4d3970dec5665c3c952374f5d9bbf327674a0b240de62b202b319f61182e2a262b2ca5ef5a592084299504689db5448cd64c04b1f26eb01d9100010000007068bfd0f0f914ea3682d0e5cb3231b75cb9f0776bf9013d79b998d96c93ce2b0300000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b140420f0000000000ed5600000000000000ba2a4ac4a5ce4e03a82d2240ae3661419f7081b1302d8900000000006e0d0300000000000083874350e65e84aa6e06192408951d7aaac7809e10270000000000005c64030000000000" - } -} -``` - -### Get transaction info by id - -API sets: `READ` - -``` -URI: /api/v1/transaction -Method: GET -Args: - txid: transaction id - verbose: [bool] include verbose transaction input data - encoded: [bool] return the transaction as hex-encoded serialized bytes -``` - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -If the transaction is confirmed, the calculated hours are the hours the transaction had in the block in which it was executed.. -If the transaction is unconfirmed, the calculated hours are based upon the current system time, and are approximately -equal to the hours the output would have if it become confirmed immediately. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/transaction?txid=a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3 -``` - -Result: - -```json -{ - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 1, - "block_seq": 1178 - }, - "txn": { - "length": 183, - "type": 0, - "txid": "a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3", - "inner_hash": "075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e", - "timestamp": 1494275231, - "sigs": [ - "9b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00" - ], - "inputs": [ - "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191" - ], - "outputs": [ - { - "uxid": "70fa9dfb887f9ef55beb4e960f60e4703c56f98201acecf2cad729f5d7e84690", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "8.000000", - "hours": 931 - } - ] - } -} -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/transaction?txid=a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3&verbose=1 -``` - -Result: - -```json -{ - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53107, - "block_seq": 1178 - }, - "time": 1494275231, - "txn": { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53107, - "block_seq": 1178 - }, - "timestamp": 1494275231, - "length": 183, - "type": 0, - "txid": "a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3", - "inner_hash": "075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e", - "fee": 6523, - "sigs": [ - "9b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00" - ], - "inputs": [ - { - "uxid": "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191", - "owner": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 7454, - "calculated_hours": 7454 - } - ], - "outputs": [ - { - "uxid": "70fa9dfb887f9ef55beb4e960f60e4703c56f98201acecf2cad729f5d7e84690", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "8.000000", - "hours": 931 - } - ] - } -} -``` - -Example (encoded): - -```sh -curl http://127.0.0.1:6420/api/v1/transaction?txid=a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3&encoded=1 -``` - -Result: - -```json -{ - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53267, - "block_seq": 1178 - }, - "time": 1494275231, - "encoded_transaction": "b700000000075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e010000009b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00010000005287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191010000000010722f061aa262381dce35193d43eceb112373c300127a0000000000a303000000000000" -} -``` - -### Get raw transaction by id - -API sets: `READ` - -``` -URI: /api/v1/rawtx -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/rawtx?txid=a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3 -``` - -Result: - -```json -"b700000000075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e010000009b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00010000005287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191010000000010722f061aa262381dce35193d43eceb112373c300127a0000000000a303000000000000" -``` - -### Inject raw transaction - -API sets: `TXN`, `WALLET` - -``` -URI: /api/v1/injectTransaction -Method: POST -Content-Type: application/json -Body: {"rawtx": "hex-encoded serialized transaction string"} -Errors: - 400 - Bad input - 500 - Other - 503 - Network unavailable (transaction failed to broadcast) -``` - -Broadcasts a hex-encoded, serialized transaction to the network. -Transactions are serialized with the `encoder` package. -See [`coin.Transaction.Serialize`](https://godoc.org/github.com/SkycoinProject/skycoin/src/coin#Transaction.Serialize). - -If there are no available connections, the API responds with a `503 Service Unavailable` error. - -Note that in some circumstances the transaction can fail to broadcast but this endpoint will still return successfully. -This can happen if the node's network has recently become unavailable but its connections have not timed out yet. - -The recommended way to handle transaction injections from your system is to inject the transaction then wait -for the transaction to be confirmed. Transactions typically confirm quickly, so if it is not confirmed after some -timeout such as 1 minute, the application can continue to retry the broadcast with `/api/v1/resendUnconfirmedTxns`. -Broadcast only fails without an error if the node's peers disconnect or timeout after the broadcast was initiated, -which is a network problem that may recover, so rebroadcasting with `/api/v1/resendUnconfirmedTxns` will resolve it, -or else the network is unavailable. - -`POST /api/v1/transaction` accepts an `ignore_unconfirmed` option to allow transactions to be created without waiting -for unconfirmed transactions to confirm. - -Any unconfirmed transactions found in the database at startup are resent. So, if the network broadcast failed but -the transaction was saved to the database, when you restart the client, it will resend. - -It is safe to retry the injection after a `503` failure. - -To disable the network broadcast, add `"no_broadcast": true` to the JSON request body. -The transaction will be added to the local transaction pool but not be broadcast at the same time. -Note that transactions from the pool are periodically announced, so this transaction will still -be announced eventually if the daemon continues running with connectivity for enough time. - -Example: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/injectTransaction -H 'content-type: application/json' -d '{ - "rawtx":"dc0000000008b507528697b11340f5a3fcccbff031c487bad59d26c2bdaea0cd8a0199a1720100000017f36c9d8bce784df96a2d6848f1b7a8f5c890986846b7c53489eb310090b91143c98fd233830055b5959f60030b3ca08d95f22f6b96ba8c20e548d62b342b5e0001000000ec9cf2f6052bab24ec57847c72cfb377c06958a9e04a077d07b6dd5bf23ec106020000000072116096fe2207d857d18565e848b403807cd825c044840300000000330100000000000000575e472f8c5295e8fa644e9bc5e06ec10351c65f40420f000000000066020000000000000" -}' -``` - -Result: - -```json -"3615fc23cc12a5cb9190878a2151d1cf54129ff0cd90e5fc4f4e7debebad6868" -``` - -Example, without broadcasting the transaction: - -```sh -curl -X POST http://127.0.0.1:6420/api/v1/injectTransaction -H 'content-type: application/json' -d '{ - "rawtx":"dc0000000008b507528697b11340f5a3fcccbff031c487bad59d26c2bdaea0cd8a0199a1720100000017f36c9d8bce784df96a2d6848f1b7a8f5c890986846b7c53489eb310090b91143c98fd233830055b5959f60030b3ca08d95f22f6b96ba8c20e548d62b342b5e0001000000ec9cf2f6052bab24ec57847c72cfb377c06958a9e04a077d07b6dd5bf23ec106020000000072116096fe2207d857d18565e848b403807cd825c044840300000000330100000000000000575e472f8c5295e8fa644e9bc5e06ec10351c65f40420f000000000066020000000000000", - "no_broadcast": true -}' -``` - -Result: - -```json -"3615fc23cc12a5cb9190878a2151d1cf54129ff0cd90e5fc4f4e7debebad6868" -``` - - -### Get transactions for addresses - -API sets: `READ` - -``` -URI: /api/v1/transactions -Method: GET, POST -Args: - addrs: Comma separated addresses [optional, returns all transactions if no address is provided] - confirmed: Whether the transactions should be confirmed [optional, must be 0 or 1; if not provided, returns all] - verbose: [bool] include verbose transaction input data -``` - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -If the transaction is confirmed, the calculated hours are the hours the transaction had in the block in which it was executed. -If the transaction is unconfirmed, the calculated hours are based upon the current system time, and are approximately -equal to the hours the output would have if it become confirmed immediately. - -The `"time"` field at the top level of each object in the response array indicates either the confirmed timestamp of a confirmed -transaction or the last received timestamp of an unconfirmed transaction. - -The `POST` method can be used if many addresses need to be queried. - -To get confirmed transactions for one or more addresses: - -```sh -curl http://127.0.0.1:6420/api/v1/transactions?addrs=7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD,6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY&confirmed=1 -``` - -To get unconfirmed transactions for one or more addresses: - -```sh -curl http://127.0.0.1:6420/api/v1/transactions?addrs=7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD,6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY&confirmed=0 -``` - -To get both confirmed and unconfirmed transactions for one or more addresses: - -```sh -curl http://127.0.0.1:6420/api/v1/transactions?addrs=7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD,6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY -``` - -Result: - -```json -[ - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 10492, - "block_seq": 1177 - }, - "time": 1494275011, - "txn": { - "length": 317, - "type": 0, - "txid": "b09cd3a8baef6a449848f50a1b97943006ca92747d4e485d0647a3ea74550eca", - "inner_hash": "2cb370051c92521a04ba5357e229d8ffa90d9d1741ea223b44dd60a1483ee0e5", - "timestamp": 1494275011, - "sigs": [ - "a55155ca15f73f0762f79c15917949a936658cff668647daf82a174eed95703a02622881f9cf6c7495536676f931b2d91d389a9e7b034232b3a1519c8da6fb8800", - "cc7d7cbd6f31adabd9bde2c0deaa9277c0f3cf807a4ec97e11872817091dc3705841a6adb74acb625ee20ab6d3525350b8663566003276073d94c3bfe22fe48e01" - ], - "inputs": [ - "4f4b0078a9cd19b3395e54b3f42af6adc997f77f04e0ca54016c67c4f2384e3c", - "36f4871646b6564b2f1ab72bd768a67579a1e0242bc68bcbcf1779bc75b3dddd" - ], - "outputs": [ - { - "uxid": "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191", - "dst": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 7454 - }, - { - "uxid": "a1268e9bd2033b49b44afa765d20876467254f51e5515626780467267a65c563", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "1.000000", - "hours": 7454 - } - ] - } - }, - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 10491, - "block_seq": 1178 - }, - "time": 1494275231, - "txn": { - "length": 183, - "type": 0, - "txid": "a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3", - "inner_hash": "075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e", - "timestamp": 1494275231, - "sigs": [ - "9b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00" - ], - "inputs": [ - "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191" - ], - "outputs": [ - { - "uxid": "70fa9dfb887f9ef55beb4e960f60e4703c56f98201acecf2cad729f5d7e84690", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "8.000000", - "hours": 931 - } - ] - } - }, - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 8730, - "block_seq": 2939 - }, - "time": 1505205561, - "txn": { - "length": 474, - "type": 0, - "txid": "b45e571988bc07bd0b623c999655fa878fb9bdd24c8cd24fde179bf4b26ae7b7", - "inner_hash": "393804eca6afadc05db80cfb9e1024ef5761231c70705c406301bad33161f8bf", - "timestamp": 1505205561, - "sigs": [ - "fb9dd021cdff51ab56891cca0fd1600877f6e0691136dbe3f8324c3f4f7ee5bc624ded4954c1d70d8cb776ce3454d8f195bbb252e48b0f2cd388f5a733697d9301", - "0639e61ba87a61f10b0e0114008ddd4e7090d9397370de28da27b7852b231b8e66c36d10fe3424c9b23a41266fd2c50f169233009713b332d6a48ce9c128ccef01", - "055afe17222aab66c48c8e08e03a406bf2b8719f5221ec54c8e678078033bcd56b66bbc46a866f2be5e3f9ca454e3fbc2021630d0430b72e18c24d02df03c03100", - "8cf56fb96e11d49bea728cb35ba5953fbc640817fac01b82e62a959ef8d4c3105298f2a6ea127bb07552abd905a667b58f6c79717e9f05258079de08d91f10a500" - ], - "inputs": [ - "dea9266aa7b687f4391e92f04436407c51a834274a5a33bc8bcf3189732e82e3", - "e811bdce52ddac0d952d2546fdca8d1ac4e0ad32f170d3d73b724fb37c802652", - "e94ccdbc07cc62fb41140b4daa7969438c749837c0808acf20dde113bdf1876b", - "534afc496a7aee2ec55c71d85abfc27f35d16c56506f663b24d8ee4815583b6e" - ], - "outputs": [ - { - "uxid": "732e129fc1630aba3f06d833ce0a7a25f05dae5df3e7a135b5f82e99222e8c28", - "dst": "2hAjmdPP9R3um9JhKczeVdJUVugY6SPJBDm", - "coins": "6.000000", - "hours": 204 - } - ] - } - } -] -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/transactions?addrs=7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD,2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT&verbose=1 -``` - -Result: - -```json -[ - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53207, - "block_seq": 1131 - }, - "time": 1494192581, - "txn": { - "timestamp": 1494192581, - "length": 220, - "type": 0, - "txid": "b785dc57a9b53dbf0390213480dd9dffc32356fb79b82fa622a2607894bfab98", - "inner_hash": "5279e944502d6bdaff25af7b7fb7c6e503c62ae70a01084031e1cb563afe2e2c", - "fee": 317021, - "sigs": [ - "f8cd208acc6674de79fa1192e5177325cda871c26707242dbd6fb9df245bf34b2fbc3dfe32e61eefa0543934556cb073bdeab6e555d7bfe6b7220f1ae575613d01" - ], - "inputs": [ - { - "uxid": "004d3ef83af64c542701b923ec5c727734de9d88837bcea37a2927a569dd3f0d", - "owner": "MbZvwdXHnMUZ1eUFxNDqxPEEHkkffKgq2F", - "coins": "904.000000", - "hours": 14, - "calculated_hours": 422693 - } - ], - "outputs": [ - { - "uxid": "4047c5cbbaf0ed927caa1391d5456d58e0857ef188f2eec8ee987a30b3f53aed", - "dst": "MbZvwdXHnMUZ1eUFxNDqxPEEHkkffKgq2F", - "coins": "903.000000", - "hours": 52836 - }, - { - "uxid": "4f4b0078a9cd19b3395e54b3f42af6adc997f77f04e0ca54016c67c4f2384e3c", - "dst": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "1.000000", - "hours": 52836 - } - ] - } - }, - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53206, - "block_seq": 1132 - }, - "time": 1494192731, - "txn": { - "timestamp": 1494192731, - "length": 220, - "type": 0, - "txid": "dc39c39bea82e5b56a1a77ce8485d9b06fda694e04ddf63af1273351c87dd077", - "inner_hash": "b8f36a57212a68f4b3ecf9d699f286dafcdb624551e07c35a983832ffd37326c", - "fee": 39628, - "sigs": [ - "1005adda19efe31b5cd85caa85b4a42599263f649103fd26761f2261f3ee00460d9693c45406d782b0e04613aa412a5ef6b275c2a665a9f13167912da91777a700" - ], - "inputs": [ - { - "uxid": "4047c5cbbaf0ed927caa1391d5456d58e0857ef188f2eec8ee987a30b3f53aed", - "owner": "MbZvwdXHnMUZ1eUFxNDqxPEEHkkffKgq2F", - "coins": "903.000000", - "hours": 52836, - "calculated_hours": 52836 - } - ], - "outputs": [ - { - "uxid": "a6662ea872dabee2fae96a4561d67728d16cb3da372d4b7bbc74a18f2bc3fecf", - "dst": "MbZvwdXHnMUZ1eUFxNDqxPEEHkkffKgq2F", - "coins": "895.000000", - "hours": 6604 - }, - { - "uxid": "36f4871646b6564b2f1ab72bd768a67579a1e0242bc68bcbcf1779bc75b3dddd", - "dst": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 6604 - } - ] - } - }, - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53161, - "block_seq": 1177 - }, - "time": 1494275011, - "txn": { - "timestamp": 1494275011, - "length": 317, - "type": 0, - "txid": "b09cd3a8baef6a449848f50a1b97943006ca92747d4e485d0647a3ea74550eca", - "inner_hash": "2cb370051c92521a04ba5357e229d8ffa90d9d1741ea223b44dd60a1483ee0e5", - "fee": 44726, - "sigs": [ - "a55155ca15f73f0762f79c15917949a936658cff668647daf82a174eed95703a02622881f9cf6c7495536676f931b2d91d389a9e7b034232b3a1519c8da6fb8800", - "cc7d7cbd6f31adabd9bde2c0deaa9277c0f3cf807a4ec97e11872817091dc3705841a6adb74acb625ee20ab6d3525350b8663566003276073d94c3bfe22fe48e01" - ], - "inputs": [ - { - "uxid": "4f4b0078a9cd19b3395e54b3f42af6adc997f77f04e0ca54016c67c4f2384e3c", - "owner": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "1.000000", - "hours": 52836, - "calculated_hours": 52857 - }, - { - "uxid": "36f4871646b6564b2f1ab72bd768a67579a1e0242bc68bcbcf1779bc75b3dddd", - "owner": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 6604, - "calculated_hours": 6777 - } - ], - "outputs": [ - { - "uxid": "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191", - "dst": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 7454 - }, - { - "uxid": "a1268e9bd2033b49b44afa765d20876467254f51e5515626780467267a65c563", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "1.000000", - "hours": 7454 - } - ] - } - }, - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 53160, - "block_seq": 1178 - }, - "time": 1494275231, - "txn": { - "timestamp": 1494275231, - "length": 183, - "type": 0, - "txid": "a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3", - "inner_hash": "075f255d42ddd2fb228fe488b8b468526810db7a144aeed1fd091e3fd404626e", - "fee": 6523, - "sigs": [ - "9b6fae9a70a42464dda089c943fafbf7bae8b8402e6bf4e4077553206eebc2ed4f7630bb1bd92505131cca5bf8bd82a44477ef53058e1995411bdbf1f5dfad1f00" - ], - "inputs": [ - { - "uxid": "5287f390628909dd8c25fad0feb37859c0c1ddcf90da0c040c837c89fefd9191", - "owner": "2K6NuLBBapWndAssUtkxKfCtyjDQDHrEhhT", - "coins": "8.000000", - "hours": 7454, - "calculated_hours": 7454 - } - ], - "outputs": [ - { - "uxid": "70fa9dfb887f9ef55beb4e960f60e4703c56f98201acecf2cad729f5d7e84690", - "dst": "7cpQ7t3PZZXvjTst8G7Uvs7XH4LeM8fBPD", - "coins": "8.000000", - "hours": 931 - } - ] - } - } -] -``` - -### Resend unconfirmed transactions - -API sets: `TXN`, `WALLET` - -``` -URI: /api/v1/resendUnconfirmedTxns -Method: POST -``` - -Example: - -```sh -curl -X POST 'http://127.0.0.1:6420/api/v1/resendUnconfirmedTxns' -``` - -Result: - -```json -{ - "txids":[ - "b45e571988bc07bd0b623c999655fa878fb9bdd24c8cd24fde179bf4b26ae7b7", - "a6446654829a4a844add9f181949d12f8291fdd2c0fcb22200361e90e814e2d3" - ] -} -``` - -### Verify encoded transaction - -API sets: `READ` - -``` -URI: /api/v2/transaction/verify -Method: POST -Content-Type: application/json -Args: {"unsigned": false, "encoded_transaction": ""} -``` - -If the transaction can be parsed, passes validation and has not been spent, returns `200 OK` with the decoded transaction data, -and the `"confirmed"` field will be `false`. - -If the transaction is structurally valid, passes validation but has been spent, returns `422 Unprocessable Entity` with the decoded transaction data, -and the `"confirmed"` field will be `true`. The `"error"` `"message"` will be `"transaction has been spent"`. - -`"unsigned"` may be specified in the request. If `true`, the transaction will report an error if it is fully signed. -It will not report an error if the transaction is missing at least one signature, and the remainder of the transaction is valid. -In the response, if the transaction has any unsigned inputs, the `"unsigned"` field will be `true`. -If the request did not specify `"unsigned"` or specified it as `false`, the response will return an error for an unsigned transaction. - -If the transaction can be parsed but does not pass validation, returns `422 Unprocessable Entity` with the decoded transaction data. -The `"error"` object will be included in the response with the reason why. -If the transaction's inputs cannot be found in the unspent pool nor in the historical archive of unspents, -the transaction `"inputs"` metadata will be absent and only `"uxid"` will be present. - -If the transaction can not be parsed, returns `400 Bad Request` and the `"error"` object will be included in the response with the reason why. - -Example of valid transaction that has not been spent: - -```sh -curl -X POST -H 'Content-Type: application/json' http://127.0.0.1:6420/api/v2/transaction/verify \ --d '{"encoded_transaction": "dc000000004fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074010000007635ce932158ec06d94138adc9c9b19113fa4c2279002e6b13dcd0b65e0359f247e8666aa64d7a55378b9cc9983e252f5877a7cb2671c3568ec36579f8df1581000100000019ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d02000000003f0555073e17ea6e45283f0f1115b520d0698d03a086010000000000010000000000000000b90dc595d102c48d3281b47428670210415f585200f22b0000000000ff01000000000000"}' -``` - -Result: - -```json -{ - "data": { - "unsigned": false, - "confirmed": false, - "transaction": { - "length": 220, - "type": 0, - "txid": "82b5fcb182e3d70c285e59332af6b02bf11d8acc0b1407d7d82b82e9eeed94c0", - "inner_hash": "4fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074", - "fee": "1042", - "sigs": [ - "7635ce932158ec06d94138adc9c9b19113fa4c2279002e6b13dcd0b65e0359f247e8666aa64d7a55378b9cc9983e252f5877a7cb2671c3568ec36579f8df158100" - ], - "inputs": [ - { - "uxid": "19ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.980000", - "hours": "985", - "calculated_hours": "1554", - "timestamp": 1527080354, - "block": 30074, - "txid": "94204347ef52d90b3c5d6c31a3fced56ae3f74fd8f1f5576931aeb60847f0e59" - } - ], - "outputs": [ - { - "uxid": "b0911a5fc4dfe4524cdb82f6db9c705f4849af42fcd487a3c4abb2d17573d234", - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "coins": "0.100000", - "hours": "1" - }, - { - "uxid": "a492e6b85a434866be40da7e287bfcf14efce9803ff2fcd9d865c4046e81712a", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.880000", - "hours": "511" - } - ] - } - } -} -``` - -Example of valid transaction that *has* been spent: - -```sh -curl -X POST -H 'Content-Type: application/json' http://127.0.0.1:6420/api/v2/transaction/verify \ --d '{"encoded_transaction": "dc000000004fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074010000007635ce932158ec06d94138adc9c9b19113fa4c2279002e6b13dcd0b65e0359f247e8666aa64d7a55378b9cc9983e252f5877a7cb2671c3568ec36579f8df1581000100000019ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d02000000003f0555073e17ea6e45283f0f1115b520d0698d03a086010000000000010000000000000000b90dc595d102c48d3281b47428670210415f585200f22b0000000000ff01000000000000"}' -``` - -Result: - -```json -{ - "error": { - "message": "transaction has been spent", - "code": 422 - }, - "data": { - "unsigned": false, - "confirmed": true, - "transaction": { - "length": 220, - "type": 0, - "txid": "82b5fcb182e3d70c285e59332af6b02bf11d8acc0b1407d7d82b82e9eeed94c0", - "inner_hash": "4fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074", - "fee": "1042", - "sigs": [ - "7635ce932158ec06d94138adc9c9b19113fa4c2279002e6b13dcd0b65e0359f247e8666aa64d7a55378b9cc9983e252f5877a7cb2671c3568ec36579f8df158100" - ], - "inputs": [ - { - "uxid": "19ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.980000", - "hours": "985", - "calculated_hours": "1554", - "timestamp": 1527080354, - "block": 30074, - "txid": "94204347ef52d90b3c5d6c31a3fced56ae3f74fd8f1f5576931aeb60847f0e59" - } - ], - "outputs": [ - { - "uxid": "b0911a5fc4dfe4524cdb82f6db9c705f4849af42fcd487a3c4abb2d17573d234", - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "coins": "0.100000", - "hours": "1" - }, - { - "uxid": "a492e6b85a434866be40da7e287bfcf14efce9803ff2fcd9d865c4046e81712a", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.880000", - "hours": "511" - } - ] - } - } -} -``` - -Example of valid, unsigned transaction that has not been spent, with the unsigned parameter set to true in the request: - -```sh -curl -X POST -H 'Content-Type: application/json' http://127.0.0.1:6420/api/v2/transaction/verify \ --d '{"unsigned": true, "encoded_transaction": "dc000000004fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074010000007635ce932158ec06d94138adc9c9b19113fa4c2279002e6b13dcd0b65e0359f247e8666aa64d7a55378b9cc9983e252f5877a7cb2671c3568ec36579f8df1581000100000019ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d02000000003f0555073e17ea6e45283f0f1115b520d0698d03a086010000000000010000000000000000b90dc595d102c48d3281b47428670210415f585200f22b0000000000ff01000000000000"}' -``` - -Result: - -```json -{ - "data": { - "unsigned": true, - "confirmed": false, - "transaction": { - "length": 220, - "type": 0, - "txid": "82b5fcb182e3d70c285e59332af6b02bf11d8acc0b1407d7d82b82e9eeed94c0", - "inner_hash": "4fd024d60939fede67065b36adcaaeaf70fc009e3a5bbb8358940ccc8bbb2074", - "fee": "1042", - "sigs": [ - "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - ], - "inputs": [ - { - "uxid": "19ad5059a7fffc0369fc24b31db7e92e12a4ee2c134fb00d336d7495dec7354d", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.980000", - "hours": "985", - "calculated_hours": "1554", - "timestamp": 1527080354, - "block": 30074, - "txid": "94204347ef52d90b3c5d6c31a3fced56ae3f74fd8f1f5576931aeb60847f0e59" - } - ], - "outputs": [ - { - "uxid": "b0911a5fc4dfe4524cdb82f6db9c705f4849af42fcd487a3c4abb2d17573d234", - "address": "SMnCGfpt7zVXm8BkRSFMLeMRA6LUu3Ewne", - "coins": "0.100000", - "hours": "1" - }, - { - "uxid": "a492e6b85a434866be40da7e287bfcf14efce9803ff2fcd9d865c4046e81712a", - "address": "2HTnQe3ZupkG6k8S81brNC3JycGV2Em71F2", - "coins": "2.880000", - "hours": "511" - } - ] - } - } -} -``` - - -## Block APIs - -### Get blockchain metadata - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/blockchain/metadata -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/blockchain/metadata -``` - -Result: - -```json -{ - "head": { - "seq": 58894, - "block_hash": "3961bea8c4ab45d658ae42effd4caf36b81709dc52a5708fdd4c8eb1b199a1f6", - "previous_block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "timestamp": 1537581604, - "fee": 485194, - "version": 0, - "tx_body_hash": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "ux_hash": "f7d30ecb49f132283862ad58f691e8747894c9fc241cb3a864fc15bd3e2c83d3" - }, - "unspents": 38171, - "unconfirmed": 1 -} -``` - -### Get blockchain progress - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/blockchain/progress -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/blockchain/progress -``` - -Result: - -```json -{ - "current": 2760, - "highest": 2760, - "peers": [ - { - "address": "35.157.164.126:6000", - "height": 2760 - }, - { - "address": "63.142.253.76:6000", - "height": 2760 - }, - ] -} -``` - -### Get block by hash or seq - -API sets: `READ` - -``` -URI: /api/v1/block -Method: GET -Args: - hash: get block by hash - seq: get block by sequence number - verbose: [bool] return verbose transaction input data -``` - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -The calculated hours are the hours the transaction had in the block in which it was executed. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/block?hash=6eafd13ab6823223b714246b32c984b56e0043412950faf17defdbb2cbf3fe30 -``` - -or - -```sh -curl http://127.0.0.1:6420/api/v1/block?seq=2760 -``` - -Result: - -```json -{ - "header": { - "seq": 2760, - "block_hash": "6eafd13ab6823223b714246b32c984b56e0043412950faf17defdbb2cbf3fe30", - "previous_block_hash": "eaccd527ef263573c29000dbfb3c782ee175153c63f42abb671588b7071e877f", - "timestamp": 1504220821, - "fee": 196130, - "version": 0, - "tx_body_hash": "825ae95b81ae0ce037cdf9f1cda138bac3f3ed41c51b09e0befb71848e0f3bfd", - "ux_hash": "366af6bd80cfce79ce1ef63b45fb3ae8d9a6afc92a8590f14e18220884bd9d22" - }, - "body": { - "txns": [ - { - "length": 220, - "type": 0, - "txid": "825ae95b81ae0ce037cdf9f1cda138bac3f3ed41c51b09e0befb71848e0f3bfd", - "inner_hash": "312e5dd55e06be5f9a0ee43a00d447f2fea47a7f1fb9669ecb477d2768ab04fd", - "sigs": [ - "f0d0eb337e3440af6e8f0c105037ec205f36c83770d26a9e3a0fb4b7ec1a2be64764f4e31cbaf6629933c971613d10d58e6acb592704a7d511f19836441f09fb00" - ], - "inputs": [ - "e7594379c9a6bb111205cbfa6fac908cac1d136e207960eb0429f15fde09ac8c" - ], - "outputs": [ - { - "uxid": "840d0ee483c1dc085e6518e1928c68979af61188b809fc74da9fca982e6a61ba", - "dst": "2GgFvqoyk9RjwVzj8tqfcXVXB4orBwoc9qv", - "coins": "998.000000", - "hours": 35390 - }, - { - "uxid": "38177c437ff42f29dc8d682e2f7c278f2203b6b02f42b1a88f9eb6c2392a7f70", - "dst": "2YHKP9yH7baLvkum3U6HCBiJjnAUCLS5Z9U", - "coins": "2.000000", - "hours": 70780 - } - ] - } - ] - }, - "size": 220 -} -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/block?hash=6eafd13ab6823223b714246b32c984b56e0043412950faf17defdbb2cbf3fe30&verbose=1 -``` - -or - -```sh -curl http://127.0.0.1:6420/api/v1/block?seq=2760&verbose=1 -``` - -Result: - -```json -{ - "header": { - "seq": 2760, - "block_hash": "6eafd13ab6823223b714246b32c984b56e0043412950faf17defdbb2cbf3fe30", - "previous_block_hash": "eaccd527ef263573c29000dbfb3c782ee175153c63f42abb671588b7071e877f", - "timestamp": 1504220821, - "fee": 196130, - "version": 0, - "tx_body_hash": "825ae95b81ae0ce037cdf9f1cda138bac3f3ed41c51b09e0befb71848e0f3bfd", - "ux_hash": "366af6bd80cfce79ce1ef63b45fb3ae8d9a6afc92a8590f14e18220884bd9d22" - }, - "body": { - "txns": [ - { - "length": 220, - "type": 0, - "txid": "825ae95b81ae0ce037cdf9f1cda138bac3f3ed41c51b09e0befb71848e0f3bfd", - "inner_hash": "312e5dd55e06be5f9a0ee43a00d447f2fea47a7f1fb9669ecb477d2768ab04fd", - "fee": 196130, - "sigs": [ - "f0d0eb337e3440af6e8f0c105037ec205f36c83770d26a9e3a0fb4b7ec1a2be64764f4e31cbaf6629933c971613d10d58e6acb592704a7d511f19836441f09fb00" - ], - "inputs": [ - { - "uxid": "e7594379c9a6bb111205cbfa6fac908cac1d136e207960eb0429f15fde09ac8c", - "owner": "kbbzyrUKNVJsJDGFLAjVT5neVcx5SQjFx5", - "coins": "1000.000000", - "hours": 283123, - "calculated_hours": 302300 - } - ], - "outputs": [ - { - "uxid": "840d0ee483c1dc085e6518e1928c68979af61188b809fc74da9fca982e6a61ba", - "dst": "2GgFvqoyk9RjwVzj8tqfcXVXB4orBwoc9qv", - "coins": "998.000000", - "hours": 35390 - }, - { - "uxid": "38177c437ff42f29dc8d682e2f7c278f2203b6b02f42b1a88f9eb6c2392a7f70", - "dst": "2YHKP9yH7baLvkum3U6HCBiJjnAUCLS5Z9U", - "coins": "2.000000", - "hours": 70780 - } - ] - } - ] - }, - "size": 220 -} -``` - - -### Get blocks in specific range - -API sets: `READ` - -``` -URI: /api/v1/blocks -Method: GET, POST -Args: - start: start seq - end: end seq - seqs: comma-separated list of block seqs - verbose: [bool] return verbose transaction input data -``` - -This endpoint has two modes: range and seqs. -The `seqs` parameter cannot be combined with `start`, `end`. - -If `start` and/or `end` are provided, returns blocks in the range [`start`, `end`]. -Both start and end sequences are included in the returned array of blocks. - -If `seqs` is provided, returns blocks matching the specified sequences. -`seqs` must not contain any duplicate values. -If a block does not exist for any of the given sequence numbers, a `404` error is returned. - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -The calculated hours are the hours the transaction had in the block in which it was executed. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/blocks?start=101&end=102 -``` - -Result: - -```json -{ - "blocks": [ - { - "header": { - "seq": 101, - "block_hash": "8156057fc823589288f66c91edb60c11ff004465bcbe3a402b1328be7f0d6ce0", - "previous_block_hash": "725e76907998485d367a847b0fb49f08536c592247762279fcdbd9907fee5607", - "timestamp": 1429274666, - "fee": 720335, - "version": 0, - "tx_body_hash": "e8fe5290afba3933389fd5860dca2cbcc81821028be9c65d0bb7cf4e8d2c4c18", - "ux_hash": "348989599d30d3adfaaea98577963caa419ab0276279296e7d194a9cbb8cad04" - }, - "body": { - "txns": [ - { - "length": 183, - "type": 0, - "txid": "e8fe5290afba3933389fd5860dca2cbcc81821028be9c65d0bb7cf4e8d2c4c18", - "inner_hash": "45da31b68748eafdb08ef8bf1ebd1c07c0f14fcb0d66759d6cf4642adc956d06", - "sigs": [ - "09bce2c888ceceeb19999005cceb1efdee254cacb60edee118b51ffd740ff6503a8f9cbd60a16c7581bfd64f7529b649d0ecc8adbe913686da97fe8c6543189001" - ], - "inputs": [ - "6002f3afc7054c0e1161bcf2b4c1d4d1009440751bc1fe806e0eae33291399f4" - ], - "outputs": [ - { - "uxid": "f9bffdcbe252acb1c3a8a1e8c99829342ba1963860d5692eebaeb9bcfbcaf274", - "dst": "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "coins": "27000.000000", - "hours": 102905 - } - ] - } - ] - }, - "size": 183 - }, - { - "header": { - "seq": 102, - "block_hash": "311f4b83b4fdb9fd1d45648115969cf4b3aab2d1acad9e2aa735829245c525f3", - "previous_block_hash": "8156057fc823589288f66c91edb60c11ff004465bcbe3a402b1328be7f0d6ce0", - "timestamp": 1429274686, - "fee": 710046, - "version": 0, - "tx_body_hash": "7b13cab45b52dd2df291ec97cf000bf6ea1b647d6fdf0261a7527578d8b71b9d", - "ux_hash": "f7512b0718f392c7503f86e69175efd7835ea4c3dd3f71ff65c7ad8873a6a9e8" - }, - "body": { - "txns": [ - { - "length": 183, - "type": 0, - "txid": "7b13cab45b52dd2df291ec97cf000bf6ea1b647d6fdf0261a7527578d8b71b9d", - "inner_hash": "73bfee3a7c8d4f8a68657ebcaf69a59639f762bfc1a6f4468f3ca4724bc5b9f8", - "sigs": [ - "c4bcada17604a4a62baf50f929655027f2913639c27b773871f2135b72553c1959737e39d50e8349ffa5a7679de845aa6370999dbaaff4c7f9fd01260818683901" - ], - "inputs": [ - "4e75b4bced3404590d38ca06440c275d7fd86618a84966a0a1053fb18164e898" - ], - "outputs": [ - { - "uxid": "0a5603a1a5aeda575aa498cdaec5a4c893a28669dba84163eba2e90db3d9f39d", - "dst": "2JJ8pgq8EDAnrzf9xxBJapE2qkYLefW4uF8", - "coins": "26700.000000", - "hours": 101435 - } - ] - } - ] - }, - "size": 183 - } - ] -} -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/blocks?start=101&end=102&verbose=1 -``` - -Result: - -```json -{ - "blocks": [ - { - "header": { - "seq": 101, - "block_hash": "8156057fc823589288f66c91edb60c11ff004465bcbe3a402b1328be7f0d6ce0", - "previous_block_hash": "725e76907998485d367a847b0fb49f08536c592247762279fcdbd9907fee5607", - "timestamp": 1429274666, - "fee": 720335, - "version": 0, - "tx_body_hash": "e8fe5290afba3933389fd5860dca2cbcc81821028be9c65d0bb7cf4e8d2c4c18", - "ux_hash": "348989599d30d3adfaaea98577963caa419ab0276279296e7d194a9cbb8cad04" - }, - "body": { - "txns": [ - { - "length": 183, - "type": 0, - "txid": "e8fe5290afba3933389fd5860dca2cbcc81821028be9c65d0bb7cf4e8d2c4c18", - "inner_hash": "45da31b68748eafdb08ef8bf1ebd1c07c0f14fcb0d66759d6cf4642adc956d06", - "fee": 720335, - "sigs": [ - "09bce2c888ceceeb19999005cceb1efdee254cacb60edee118b51ffd740ff6503a8f9cbd60a16c7581bfd64f7529b649d0ecc8adbe913686da97fe8c6543189001" - ], - "inputs": [ - { - "uxid": "6002f3afc7054c0e1161bcf2b4c1d4d1009440751bc1fe806e0eae33291399f4", - "owner": "2M1C5LSZ4Pvu5RWS44bCdY6or3R8grQw7ez", - "coins": "27000.000000", - "hours": 220, - "calculated_hours": 823240 - } - ], - "outputs": [ - { - "uxid": "f9bffdcbe252acb1c3a8a1e8c99829342ba1963860d5692eebaeb9bcfbcaf274", - "dst": "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "coins": "27000.000000", - "hours": 102905 - } - ] - } - ] - }, - "size": 183 - }, - { - "header": { - "seq": 102, - "block_hash": "311f4b83b4fdb9fd1d45648115969cf4b3aab2d1acad9e2aa735829245c525f3", - "previous_block_hash": "8156057fc823589288f66c91edb60c11ff004465bcbe3a402b1328be7f0d6ce0", - "timestamp": 1429274686, - "fee": 710046, - "version": 0, - "tx_body_hash": "7b13cab45b52dd2df291ec97cf000bf6ea1b647d6fdf0261a7527578d8b71b9d", - "ux_hash": "f7512b0718f392c7503f86e69175efd7835ea4c3dd3f71ff65c7ad8873a6a9e8" - }, - "body": { - "txns": [ - { - "length": 183, - "type": 0, - "txid": "7b13cab45b52dd2df291ec97cf000bf6ea1b647d6fdf0261a7527578d8b71b9d", - "inner_hash": "73bfee3a7c8d4f8a68657ebcaf69a59639f762bfc1a6f4468f3ca4724bc5b9f8", - "fee": 710046, - "sigs": [ - "c4bcada17604a4a62baf50f929655027f2913639c27b773871f2135b72553c1959737e39d50e8349ffa5a7679de845aa6370999dbaaff4c7f9fd01260818683901" - ], - "inputs": [ - { - "uxid": "4e75b4bced3404590d38ca06440c275d7fd86618a84966a0a1053fb18164e898", - "owner": "2JJ8pgq8EDAnrzf9xxBJapE2qkYLefW4uF8", - "coins": "26700.000000", - "hours": 54, - "calculated_hours": 811481 - } - ], - "outputs": [ - { - "uxid": "0a5603a1a5aeda575aa498cdaec5a4c893a28669dba84163eba2e90db3d9f39d", - "dst": "2JJ8pgq8EDAnrzf9xxBJapE2qkYLefW4uF8", - "coins": "26700.000000", - "hours": 101435 - } - ] - } - ] - }, - "size": 183 - } - ] -} -``` - -Example (seqs): - -```sh -curl http://127.0.0.1:6420/api/v1/blocks?seqs=3,5,7 -``` - -```json -{ - "blocks": [ - { - "header": { - "seq": 3, - "block_hash": "35c3ebbe6feaeeab27ac77c1712051787bdd4bbfb5cdcdebc81f8aac98a2f3f3", - "previous_block_hash": "01723bc4dc90f1cb857a94fe5e3bb50c02e6689fd998f8147c9cae07fbfa63af", - "timestamp": 1427927671, - "fee": 0, - "version": 0, - "tx_body_hash": "a6a709e9388a4d67a47d262b11da5f804eddd9d67acc4a3e450f7a567bdc1619" - }, - "body": { - "txns": [ - { - "length": 183, - "type": 0, - "txid": "a6a709e9388a4d67a47d262b11da5f804eddd9d67acc4a3e450f7a567bdc1619", - "inner_hash": "ea6adee3180c7f9d73d1e693822d5d1c2bba85067f89a873355bc771a078faa1", - "sigs": [ - "ce8fd47e2044ed17998f92621e90329f673a746c802d67f639ca083705dd199f6ee346781497b44132434922879244d819694b5903093f784570c55d293ab4af01" - ], - "inputs": [ - "af0b2c1cc882a56b6c0c06e99e7d2731413b988329a2c47a5c2aa8be589b707a" - ], - "outputs": [ - { - "uxid": "9eb7954461ba0256c9054fe38c00c66e60428dccf900a62e74b9fe39310aea13", - "dst": "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "coins": "10.000000", - "hours": 0 - } - ] - } - ] - }, - "size": 183 - }, - { - "header": { - "seq": 5, - "block_hash": "114fe60587a158428a47e0f9571d764f495912c299aa4e67fc88004cf21b0c24", - "previous_block_hash": "415e47348a1e642cb2e31d00ee500747d3aed0336aabfff7d783ed21465251c7", - "timestamp": 1428798821, - "fee": 2036, - "version": 0, - "tx_body_hash": "0579e7727627cd9815a8a8b5e1df86124f45a4132cc0dbd00d2f110e4f409b69" - }, - "body": { - "txns": [ - { - "length": 317, - "type": 0, - "txid": "0579e7727627cd9815a8a8b5e1df86124f45a4132cc0dbd00d2f110e4f409b69", - "inner_hash": "fe123ca954a82bb1ce2cc9ef9c56d6b649a4cbaf5b17394b0ffda651ed32327e", - "sigs": [ - "056ed0f74367fb1370d7e98689953983d9cf34eb6669854f1645c8a16c93d85075661e7d4f6df0ce5ca8eb9852eff6a12fbac2caafee03bb8c616f847c61416800", - "8aaa7f320a7b01169d3217a600100cb27c55e4ce56cd3455814f56d8e4e65be746e0e20e776087af6f19361f0b898edc2123a5f9bd35d24ef8b8669ca85b142601" - ], - "inputs": [ - "9eb7954461ba0256c9054fe38c00c66e60428dccf900a62e74b9fe39310aea13", - "706f82c481906108880d79372ab5c126d32ecc98cf3f7c74cf33f5fda49dcf70" - ], - "outputs": [ - { - "uxid": "fa2b598d233fe434f907f858d5de812eacf50c7b3fd152c77cd6e246fe356a9e", - "dst": "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "coins": "999890.000000", - "hours": 4073 - }, - { - "uxid": "dc63c680f408c4e646037966189383a5d50eda34e666c2a0c75c0c6bf13b71a1", - "dst": "2fGC7kwAM9yZyEF1QqBqp8uo9RUsF6ENGJF", - "coins": "100.000000", - "hours": 0 - } - ] - } - ] - }, - "size": 317 - }, - { - "header": { - "seq": 7, - "block_hash": "6cb71b57c998a5367101e01d48c097eccd4f5abf311c89bcca8ee213581f355f", - "previous_block_hash": "103949030e90fcebc5d8ca1c9c59f30a31aa71911401d22a2422e4571b035701", - "timestamp": 1428807671, - "fee": 0, - "version": 0, - "tx_body_hash": "f832428481690fa918d6d29946e191f2c8c89b2388a906e0c53dceee6070a24b" - }, - "body": { - "txns": [ - { - "length": 220, - "type": 0, - "txid": "f832428481690fa918d6d29946e191f2c8c89b2388a906e0c53dceee6070a24b", - "inner_hash": "f440c514779522a6387edda9b9d9835f00680fb314546efb7bc9762a17884156", - "sigs": [ - "8fe96f5502270e4efa962b2aef2b81795fe26a8f0c9a494e2ae9c7e624af455c49396270ae7a25b41d439fd56dea9d556a135129122de1b1274b1e2a5d75f2ea01" - ], - "inputs": [ - "8ff8a647e4542fab01e078ac467b2c9f2e5f7de55d77ec2711f8abc718e2c91b" - ], - "outputs": [ - { - "uxid": "17090c40091d009d6a684043d3be2e9cb1dc60a664a9c2e388af1f3a7345724b", - "dst": "2fGC7kwAM9yZyEF1QqBqp8uo9RUsF6ENGJF", - "coins": "90.000000", - "hours": 0 - }, - { - "uxid": "f9e7a412cdff80e95ddbe1d76fcc73f967cb99d383b0659e1355c8e623f02b62", - "dst": "WADSeEwEQVbtUy8CfcVimyxX1KjTRkvfoK", - "coins": "5.000000", - "hours": 0 - } - ] - } - ] - }, - "size": 220 - } - ] -} -``` - - -### Get last N blocks - -API sets: `READ` - -``` -URI: /api/v1/last_blocks -Method: GET -Args: - num: number of most recent blocks to return - verbose: [bool] return verbose transaction input data -``` - -If verbose, the transaction inputs include the owner address, coins, hours and calculated hours. -The hours are the original hours the output was created with. -The calculated hours are the hours the transaction had in the block in which it was executed. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/last_blocks?num=2 -``` - -Result: - -```json -{ - "blocks": [ - { - "header": { - "seq": 58893, - "block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "previous_block_hash": "1f042ed976c0cb150ea6b71c9608d65b519e4bc1c507eba9f1146e443a856c2d", - "timestamp": 1537581594, - "fee": 970389, - "version": 0, - "tx_body_hash": "1bea5cf1279693a0da24828c37b267c702007842b16ca5557ae497574d15aab7", - "ux_hash": "bf35652af199779bc40cbeb339e8a782ff70673b07779e5c5621d37dfe13b42b" - }, - "body": { - "txns": [ - { - "length": 377, - "type": 0, - "txid": "1bea5cf1279693a0da24828c37b267c702007842b16ca5557ae497574d15aab7", - "inner_hash": "a25232405bcef0c007bb2d7d3520f2a389e17e11125c252ab6c00168ec52c08d", - "sigs": [ - "2ff7390c3b66c6b0fbb2b4c59c8e218291d4cbb82a836bb577c7264677f4a8320f6f3ad72d804e3014728baa214c223ecced8725b64be96fe3b51332ad1eda4201", - "9e7c715f897b3c987c00ee8c6b14e4b90bb3e4e11d003b481f82042b1795b3c75eaa3d563cd0358cdabdab77cfdbead7323323cf73e781f9c1a8cf6d9b4f8ac100", - "5c9748314f2fe0cd442df5ebb8f211087111d22e9463355bf9eee583d44df1bd36addb510eb470cb5dafba0732615f8533072f80ae05fc728c91ce373ada1e7b00" - ], - "inputs": [ - "5f634c825b2a53103758024b3cb8578b17d56d422539e23c26b91ea397161703", - "16ac52084ffdac2e9169b9e057d44630dec23d18cfb90b9437d28220a3dc585d", - "8d3263890d32382e182b86f8772c7685a8f253ed475c05f7d530e9296f692bc9" - ], - "outputs": [ - { - "uxid": "fb8db3f78928aee3f5cbda8db7fc290df9e64414e8107872a1c5cf83e08e4df7", - "dst": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "26.913000", - "hours": 970388 - } - ] - } - ] - }, - "size": 377 - }, - { - "header": { - "seq": 58894, - "block_hash": "3961bea8c4ab45d658ae42effd4caf36b81709dc52a5708fdd4c8eb1b199a1f6", - "previous_block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "timestamp": 1537581604, - "fee": 485194, - "version": 0, - "tx_body_hash": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "ux_hash": "f7d30ecb49f132283862ad58f691e8747894c9fc241cb3a864fc15bd3e2c83d3" - }, - "body": { - "txns": [ - { - "length": 257, - "type": 0, - "txid": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "inner_hash": "f7dbd09f7e9f65d87003984640f1977fb9eec95b07ef6275a1ec6261065e68d7", - "sigs": [ - "af5329e77213f34446a0ff41d249fd25bc1dae913390871df359b9bd587c95a10b625a74a3477a05cc7537cb532253b12c03349ead5be066b8e0009e79462b9501" - ], - "inputs": [ - "fb8db3f78928aee3f5cbda8db7fc290df9e64414e8107872a1c5cf83e08e4df7" - ], - "outputs": [ - { - "uxid": "235811602fc96cf8b5b031edb88ee1606830aa641c06e0986681552d8728ec07", - "dst": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "0.500000", - "hours": 1 - }, - { - "uxid": "873da4edc01c0b5184e1f26c4c3471dd407d08e9ab36b018ab93874e7392320b", - "dst": "2XBMMDMqTTYmqs2rfjEwYDz8ABd38y9B8r7", - "coins": "0.500000", - "hours": 1 - }, - { - "uxid": "42a6f0127f61e1d7bca8e9680027eddcecad772250c5634a03e56a8b1cf5a816", - "dst": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "25.913000", - "hours": 485192 - } - ] - } - ] - }, - "size": 257 - } - ] -} -``` - -Example (verbose): - -```sh -curl http://127.0.0.1:6420/api/v1/last_blocks?num=2&verbose=1 -``` - -Result: - -```json -{ - "blocks": [ - { - "header": { - "seq": 58893, - "block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "previous_block_hash": "1f042ed976c0cb150ea6b71c9608d65b519e4bc1c507eba9f1146e443a856c2d", - "timestamp": 1537581594, - "fee": 970389, - "version": 0, - "tx_body_hash": "1bea5cf1279693a0da24828c37b267c702007842b16ca5557ae497574d15aab7", - "ux_hash": "bf35652af199779bc40cbeb339e8a782ff70673b07779e5c5621d37dfe13b42b" - }, - "body": { - "txns": [ - { - "length": 377, - "type": 0, - "txid": "1bea5cf1279693a0da24828c37b267c702007842b16ca5557ae497574d15aab7", - "inner_hash": "a25232405bcef0c007bb2d7d3520f2a389e17e11125c252ab6c00168ec52c08d", - "fee": 970389, - "sigs": [ - "2ff7390c3b66c6b0fbb2b4c59c8e218291d4cbb82a836bb577c7264677f4a8320f6f3ad72d804e3014728baa214c223ecced8725b64be96fe3b51332ad1eda4201", - "9e7c715f897b3c987c00ee8c6b14e4b90bb3e4e11d003b481f82042b1795b3c75eaa3d563cd0358cdabdab77cfdbead7323323cf73e781f9c1a8cf6d9b4f8ac100", - "5c9748314f2fe0cd442df5ebb8f211087111d22e9463355bf9eee583d44df1bd36addb510eb470cb5dafba0732615f8533072f80ae05fc728c91ce373ada1e7b00" - ], - "inputs": [ - { - "uxid": "5f634c825b2a53103758024b3cb8578b17d56d422539e23c26b91ea397161703", - "owner": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "25.910000", - "hours": 7745, - "calculated_hours": 17458 - }, - { - "uxid": "16ac52084ffdac2e9169b9e057d44630dec23d18cfb90b9437d28220a3dc585d", - "owner": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "1.000000", - "hours": 1915246, - "calculated_hours": 1915573 - }, - { - "uxid": "8d3263890d32382e182b86f8772c7685a8f253ed475c05f7d530e9296f692bc9", - "owner": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "0.003000", - "hours": 7745, - "calculated_hours": 7746 - } - ], - "outputs": [ - { - "uxid": "fb8db3f78928aee3f5cbda8db7fc290df9e64414e8107872a1c5cf83e08e4df7", - "dst": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "26.913000", - "hours": 970388 - } - ] - } - ] - }, - "size": 377 - }, - { - "header": { - "seq": 58894, - "block_hash": "3961bea8c4ab45d658ae42effd4caf36b81709dc52a5708fdd4c8eb1b199a1f6", - "previous_block_hash": "8eca94e7597b87c8587286b66a6b409f6b4bf288a381a56d7fde3594e319c38a", - "timestamp": 1537581604, - "fee": 485194, - "version": 0, - "tx_body_hash": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "ux_hash": "f7d30ecb49f132283862ad58f691e8747894c9fc241cb3a864fc15bd3e2c83d3" - }, - "body": { - "txns": [ - { - "length": 257, - "type": 0, - "txid": "c03c0dd28841d5aa87ce4e692ec8adde923799146ec5504e17ac0c95036362dd", - "inner_hash": "f7dbd09f7e9f65d87003984640f1977fb9eec95b07ef6275a1ec6261065e68d7", - "fee": 485194, - "sigs": [ - "af5329e77213f34446a0ff41d249fd25bc1dae913390871df359b9bd587c95a10b625a74a3477a05cc7537cb532253b12c03349ead5be066b8e0009e79462b9501" - ], - "inputs": [ - { - "uxid": "fb8db3f78928aee3f5cbda8db7fc290df9e64414e8107872a1c5cf83e08e4df7", - "owner": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "26.913000", - "hours": 970388, - "calculated_hours": 970388 - } - ], - "outputs": [ - { - "uxid": "235811602fc96cf8b5b031edb88ee1606830aa641c06e0986681552d8728ec07", - "dst": "2Huip6Eizrq1uWYqfQEh4ymibLysJmXnWXS", - "coins": "0.500000", - "hours": 1 - }, - { - "uxid": "873da4edc01c0b5184e1f26c4c3471dd407d08e9ab36b018ab93874e7392320b", - "dst": "2XBMMDMqTTYmqs2rfjEwYDz8ABd38y9B8r7", - "coins": "0.500000", - "hours": 1 - }, - { - "uxid": "42a6f0127f61e1d7bca8e9680027eddcecad772250c5634a03e56a8b1cf5a816", - "dst": "uvcDrKc8rHTjxLrU4mPN56Hyh2tR6RvCvw", - "coins": "25.913000", - "hours": 485192 - } - ] - } - ] - }, - "size": 257 - } - ] -} -``` - -## Uxout APIs - -### Get uxout - -API sets: `READ` - -``` -URI: /api/v1/uxout -Method: GET -Args: - uxid -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/uxout?uxid=8b64d9b058e10472b9457fd2d05a1d89cbbbd78ce1d97b16587d43379271bed1 -``` - -Result: - -```json -{ - "uxid": "8b64d9b058e10472b9457fd2d05a1d89cbbbd78ce1d97b16587d43379271bed1", - "time": 1502870712, - "src_block_seq": 2545, - "src_tx": "ded9e671510ab300a4ea3ee126fe8e2d50b995021e2db4589c6fb4ac000fe7bb", - "owner_address": "c9zyTYwgR4n89KyzknpmGaaDarUCPEs9mV", - "coins": 2000000, - "hours": 5039, - "spent_block_seq": 2556, - "spent_tx": "b51e1933f286c4f03d73e8966186bafb25f64053db8514327291e690ae8aafa5" -} -``` - -### Get historical unspent outputs for an address - -API sets: `READ` - -``` -URI: /api/v1/address_uxouts -Method: GET -Args: - address -``` - -Returns the historical, spent outputs of a given address. - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/address_uxouts?address=6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY -``` - -Result: - -```json -[ - { - "uxid": "7669ff7350d2c70a88093431a7b30d3e69dda2319dcb048aa80fa0d19e12ebe0", - "time": 1502936862, - "src_block_seq": 2556, - "src_tx": "b51e1933f286c4f03d73e8966186bafb25f64053db8514327291e690ae8aafa5", - "owner_address": "6dkVxyKFbFKg9Vdg6HPg1UANLByYRqkrdY", - "coins": 2000000, - "hours": 633, - "spent_block_seq": 0, - "spent_tx": "0000000000000000000000000000000000000000000000000000000000000000" - } -] -``` - -## Coin supply related information - -### Coin supply - -API sets: `READ` - -``` -URI: /api/v1/coinSupply -Method: GET -``` - -Example: - -```sh -curl http://127.0.0.1:6420/api/v1/coinSupply -``` - -Result: - -```json -{ - "current_supply": "7187500.000000", - "total_supply": "25000000.000000", - "max_supply": "100000000.000000", - "current_coinhour_supply": "23499025077", - "total_coinhour_supply": "93679828577", - "unlocked_distribution_addresses": [ - "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "2EYM4WFHe4Dgz6kjAdUkM6Etep7ruz2ia6h", - "25aGyzypSA3T9K6rgPUv1ouR13efNPtWP5m", - "ix44h3cojvN6nqGcdpy62X7Rw6Ahnr3Thk", - "AYV8KEBEAPCg8a59cHgqHMqYHP9nVgQDyW", - "2Nu5Jv5Wp3RYGJU1EkjWFFHnebxMx1GjfkF", - "2THDupTBEo7UqB6dsVizkYUvkKq82Qn4gjf", - "tWZ11Nvor9parjg4FkwxNVcby59WVTw2iL", - "m2joQiJRZnj3jN6NsoKNxaxzUTijkdRoSR", - "8yf8PAQqU2cDj8Yzgz3LgBEyDqjvCh2xR7", - "sgB3n11ZPUYHToju6TWMpUZTUcKvQnoFMJ", - "2UYPbDBnHUEc67e7qD4eXtQQ6zfU2cyvAvk", - "wybwGC9rhm8ZssBuzpy5goXrAdE31MPdsj", - "JbM25o7kY7hqJZt3WGYu9pHZFCpA9TCR6t", - "2efrft5Lnwjtk7F1p9d7BnPd72zko2hQWNi", - "Syzmb3MiMoiNVpqFdQ38hWgffHg86D2J4e", - "2g3GUmTQooLrNHaRDhKtLU8rWLz36Beow7F", - "D3phtGr9iv6238b3zYXq6VgwrzwvfRzWZQ", - "gpqsFSuMCZmsjPc6Rtgy1FmLx424tH86My", - "2EUF3GPEUmfocnUc1w6YPtqXVCy3UZA4rAq", - "TtAaxB3qGz5zEAhhiGkBY9VPV7cekhvRYS", - "2fM5gVpi7XaiMPm4i29zddTNkmrKe6TzhVZ", - "ix3NDKgxfYYANKAb5kbmwBYXPrkAsha7uG", - "2RkPshpFFrkuaP98GprLtgHFTGvPY5e6wCK", - "Ak1qCDNudRxZVvcW6YDAdD9jpYNNStAVqm" - ], - "locked_distribution_addresses": [ - "2eZYSbzBKJ7QCL4kd5LSqV478rJQGb4UNkf", - "KPfqM6S96WtRLMuSy4XLfVwymVqivdcDoM", - "5B98bU1nsedGJBdRD5wLtq7Z8t8ZXio8u5", - "2iZWk5tmBynWxj2PpAFyiZzEws9qSnG3a6n", - "XUGdPaVnMh7jtzPe3zkrf9FKh5nztFnQU5", - "hSNgHgewJme8uaHrEuKubHYtYSDckD6hpf", - "2DeK765jLgnMweYrMp1NaYHfzxumfR1PaQN", - "orrAssY5V2HuQAbW9K6WktFrGieq2m23pr", - "4Ebf4PkG9QEnQTm4MVvaZvJV6Y9av3jhgb", - "7Uf5xJ3GkiEKaLxC2WmJ1t6SeekJeBdJfu", - "oz4ytDKbCqpgjW3LPc52pW2CaK2gxCcWmL", - "2ex5Z7TufQ5Z8xv5mXe53fSQRfUr35SSo7Q", - "WV2ap7ZubTxeDdmEZ1Xo7ufGMkekLWikJu", - "ckCTV4r1pNuz6j2VBRHhaJN9HsCLY7muLV", - "MXJx96ZJVSjktgeYZpVK8vn1H3xWP8ooq5", - "wyQVmno9aBJZmQ99nDSLoYWwp7YDJCWsrH", - "2cc9wKxCsFNRkoAQDAoHke3ZoyL1mSV14cj", - "29k9g3F5AYfVaa1joE1PpZjBED6hQXes8Mm", - "2XPLzz4ZLf1A9ykyTCjW5gEmVjnWa8CuatH", - "iH7DqqojTgUn2JxmY9hgFp165Nk7wKfan9", - "RJzzwUs3c9C8Y7NFYzNfFoqiUKeBhBfPki", - "2W2cGyiCRM4nwmmiGPgMuGaPGeBzEm7VZPn", - "ALJVNKYL7WGxFBSriiZuwZKWD4b7fbV1od", - "tBaeg9zE2sgmw5ZQENaPPYd6jfwpVpGTzS", - "2hdTw5Hk3rsgpZjvk8TyKcCZoRVXU5QVrUt", - "A1QU6jKq8YgTP79M8fwZNHUZc7hConFKmy", - "q9RkXoty3X1fuaypDDRUi78rWgJWYJMmpJ", - "2Xvm6is5cAPA85xnSYXDuAqiRyoXiky5RaD", - "4CW2CPJEzxhn2PS4JoSLoWGL5QQ7dL2eji", - "24EG6uTzL7DHNzcwsygYGRR1nfu5kco7AZ1", - "KghGnWw5fppTrqHSERXZf61yf7GkuQdCnV", - "2WojewRA3LbpyXTP9ANy8CZqJMgmyNm3MDr", - "2BsMfywmGV3M2CoDA112Rs7ZBkiMHfy9X11", - "kK1Q4gPyYfVVMzQtAPRzL8qXMqJ67Y7tKs", - "28J4mx8xfUtM92DbQ6i2Jmqw5J7dNivfroN", - "gQvgyG1djgtftoCVrSZmsRxr7okD4LheKw", - "3iFGBKapAWWzbiGFSr5ScbhrEPm6Esyvia", - "NFW2akQH2vu7AqkQXxFz2P5vkXTWkSqrSm", - "2MQJjLnWRp9eHh6MpCwpiUeshhtmri12mci", - "2QjRQUMyL6iodtHP9zKmxCNYZ7k3jxtk49C", - "USdfKy7B6oFNoauHWMmoCA7ND9rHqYw2Mf", - "cA49et9WtptYHf6wA1F8qqVgH3kS5jJ9vK", - "qaJT9TjcMi46sTKcgwRQU8o5Lw2Ea1gC4N", - "22pyn5RyhqtTQu4obYjuWYRNNw4i54L8xVr", - "22dkmukC6iH4FFLBmHne6modJZZQ3MC9BAT", - "z6CJZfYLvmd41GRVE8HASjRcy5hqbpHZvE", - "GEBWJ2KpRQDBTCCtvnaAJV2cYurgXS8pta", - "oS8fbEm82cprmAeineBeDkaKd7QownDZQh", - "rQpAs1LVQdphyj9ipEAuukAoj9kNpSP8cM", - "6NSJKsPxmqipGAfFFhUKbkopjrvEESTX3j", - "cuC68ycVXmD2EBzYFNYQ6akhKGrh3FGjSf", - "bw4wtYU8toepomrhWP2p8UFYfHBbvEV425", - "HvgNmDz5jD39Gwmi9VfDY1iYMhZUpZ8GKz", - "SbApuZAYquWP3Q6iD51BcMBQjuApYEkRVf", - "2Ugii5yxJgLzC59jV1vF8GK7UBZdvxwobeJ", - "21N2iJ1qnQRiJWcEqNRxXwfNp8QcmiyhtPy", - "9TC4RGs6AtFUsbcVWnSoCdoCpSfM66ALAc", - "oQzn55UWG4iMcY9bTNb27aTnRdfiGHAwbD", - "2GCdwsRpQhcf8SQcynFrMVDM26Bbj6sgv9M", - "2NRFe7REtSmaM2qAgZeG45hC8EtVGV2QjeB", - "25RGnhN7VojHUTvQBJA9nBT5y1qTQGULMzR", - "26uCBDfF8E2PJU2Dzz2ysgKwv9m4BhodTz9", - "Wkvima5cF7DDFdmJQqcdq8Syaq9DuAJJRD", - "286hSoJYxvENFSHwG51ZbmKaochLJyq4ERQ", - "FEGxF3HPoM2HCWHn82tyeh9o7vEQq5ySGE", - "h38DxNxGhWGTq9p5tJnN5r4Fwnn85Krrb6", - "2c1UU8J6Y3kL4cmQh21Tj8wkzidCiZxwdwd", - "2bJ32KuGmjmwKyAtzWdLFpXNM6t83CCPLq5", - "2fi8oLC9zfVVGnzzQtu3Y3rffS65Hiz6QHo", - "TKD93RxFr2Am44TntLiJQus4qcEwTtvEEQ", - "zMDywYdGEDtTSvWnCyc3qsYHWwj9ogws74", - "25NbotTka7TwtbXUpSCQD8RMgHKspyDubXJ", - "2ayCELBERubQWH5QxUr3cTxrYpidvUAzsSw", - "RMTCwLiYDKEAiJu5ekHL1NQ8UKHi5ozCPg", - "ejJjiCwp86ykmFr5iTJ8LxQXJ2wJPTYmkm" - ] -} -``` - -### Richlist show top N addresses by uxouts - -API sets: `READ` - -``` -URI: /api/v1/richlist -Method: GET -Args: - n: top N addresses, [default 20, returns all if <= 0]. - include-distribution: include distribution addresses or not, default false. -``` - -Example: - -```sh -curl "http://127.0.0.1:6420/api/v1/richlist?n=4&include-distribution=true" -``` - -Result: - -```json -{ - "richlist": [ - { - "address": "zMDywYdGEDtTSvWnCyc3qsYHWwj9ogws74", - "coins": "1000000.000000", - "locked": true - }, - { - "address": "z6CJZfYLvmd41GRVE8HASjRcy5hqbpHZvE", - "coins": "1000000.000000", - "locked": true - }, - { - "address": "wyQVmno9aBJZmQ99nDSLoYWwp7YDJCWsrH", - "coins": "1000000.000000", - "locked": true - }, - { - "address": "tBaeg9zE2sgmw5ZQENaPPYd6jfwpVpGTzS", - "coins": "1000000.000000", - "locked": true - } - ] -} -``` - -### Count the addresses that currently have unspent outputs (coins) - -API sets: `READ` - -``` -URI: /api/v1/addresscount -Method: GET -``` - -Example: - -```sh -curl "http://127.0.0.1:6420/api/v1/addresscount" -``` - -Result: - -```json -{ - "count": 10103 -} -``` - -## Network status - -### Get information for a specific connection - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/network/connection -Method: GET -Args: - addr: ip:port address of a known connection -``` - -Connection `"state"` value can be `"pending"`, `"connected"` or `"introduced"`. - -* The `"pending"` state is prior to connection establishment. -* The `"connected"` state is after connection establishment, but before the introduction handshake has completed. -* The `"introduced"` state is after the introduction handshake has completed. - -Example: - -```sh -curl 'http://127.0.0.1:6420/api/v1/network/connection?addr=176.9.84.75:6000' -``` - -Result: - -```json -{ - "id": 109548, - "address": "176.9.84.75:6000", - "last_sent": 1520675817, - "last_received": 1520675817, - "connected_at": 1520675700, - "outgoing": false, - "state": "introduced", - "mirror": 719118746, - "height": 181, - "listen_port": 6000, - "user_agent": "skycoin:0.25.0", - "is_trusted_peer": true, - "unconfirmed_verify_transaction": { - "burn_factor": 10, - "max_transaction_size": 32768, - "max_decimals": 3 - } -} -``` - -### Get a list of all connections - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/network/connections -Method: GET -Args: - states: [optional] comma-separated list of connection states ("pending", "connected" or "introduced"). Defaults to "connected,introduced" - direction: [optional] "outgoing" or "incoming". If not provided, both are included. -``` - -Connection `"state"` value can be `"pending"`, `"connected"` or `"introduced"`. - -* The `"pending"` state is prior to connection establishment. -* The `"connected"` state is after connection establishment, but before the introduction handshake has completed. -* The `"introduced"` state is after the introduction handshake has completed. - -By default, both incoming and outgoing connections in the `"connected"` or `"introduced"` state are returned. - -Example: - -```sh -curl 'http://127.0.0.1:6420/api/v1/network/connections' -``` - -Result: - -```json -{ - "connections": [ - { - "id": 99107, - "address": "139.162.161.41:20002", - "last_sent": 1520675750, - "last_received": 1520675750, - "connected_at": 1520675500, - "outgoing": false, - "state": "introduced", - "mirror": 1338939619, - "listen_port": 20002, - "height": 180, - "user_agent": "skycoin:0.25.0", - "is_trusted_peer": true, - "unconfirmed_verify_transaction": { - "burn_factor": 10, - "max_transaction_size": 32768, - "max_decimals": 3 - } - }, - { - "id": 109548, - "address": "176.9.84.75:6000", - "last_sent": 1520675751, - "last_received": 1520675751, - "connected_at": 1520675751, - "state": "connected", - "outgoing": true, - "mirror": 0, - "listen_port": 6000, - "height": 0, - "user_agent": "", - "is_trusted_peer": true, - "unconfirmed_verify_transaction": { - "burn_factor": 0, - "max_transaction_size": 0, - "max_decimals": 0 - } - }, - { - "id": 99115, - "address": "185.120.34.60:6000", - "last_sent": 1520675754, - "last_received": 1520675754, - "connected_at": 1520673013, - "outgoing": false, - "state": "introduced", - "mirror": 1931713869, - "listen_port": 6000, - "height": 180, - "user_agent": "", - "is_trusted_peer": true, - "unconfirmed_verify_transaction": { - "burn_factor": 0, - "max_transaction_size": 0, - "max_decimals": 0 - } - } - ] -} -``` - - -### Get a list of all default connections - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/network/defaultConnections -Method: GET -``` - -Returns addresses in the default hardcoded list of peers. - -Example: - -```sh -curl 'http://127.0.0.1:6420/api/v1/network/defaultConnections' -``` - -Result: - -```json -[ - "104.237.142.206:6000", - "118.178.135.93:6000", - "139.162.7.132:6000", - "172.104.85.6:6000", - "176.58.126.224:6000", - "47.88.33.156:6000" -] -``` - -### Get a list of all trusted connections - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/network/connections/trust -Method: GET -``` - -Returns addresses marked as trusted in the peerlist. -This is typically equal to the list of addresses in the default hardcoded list of peers. - -Example: - -```sh -curl 'http://127.0.0.1:6420/api/v1/network/connections/trust' -``` - -Result: - -```json -[ - "104.237.142.206:6000", - "118.178.135.93:6000", - "139.162.7.132:6000", - "172.104.85.6:6000", - "176.58.126.224:6000", - "47.88.33.156:6000" -] -``` - -### Get a list of all connections discovered through peer exchange - -API sets: `STATUS`, `READ` - -``` -URI: /api/v1/network/connections/exchange -Method: GET -``` - -Returns addresses from the peerlist that are known to have an open port. - -Example: - -```sh -curl 'http://127.0.0.1:6420/api/v1/network/connections/exchange' -``` - -Result: - -```json -[ - "104.237.142.206:6000", - "116.62.220.158:7200", - "118.237.210.163:6000", - "139.162.161.41:20000", - "139.162.161.41:20001", - "139.162.161.41:20002", - "139.162.33.154:6000", - "139.162.7.132:6000", - "155.94.137.34:6000", - "164.132.108.92:6000", - "165.227.199.63:6000", - "172.104.145.6:6000", - "172.104.52.230:7200", - "172.104.85.6:6000", - "173.212.205.184:6000", - "173.249.30.221:6000", - "176.58.126.224:6000", - "176.9.84.75:6000", - "185.120.34.60:6000", - "35.201.160.163:6000", - "47.88.33.156:6000" -] -``` - -### Disconnect a peer - -API sets: `NET_CTRL` - -``` -URI: /api/v1/network/connection/disconnect -Method: POST -Args: - id: ID of the connection - -Returns 404 if the connection is not found. -``` - -Disconnects a peer by ID. - -Example: - -```sh -curl -X POST 'http://127.0.0.1:6420/api/v1/network/connection/disconnect?id=999' -``` - -Result: - -```json -{} -``` - -## Migrating from the unversioned API - -The unversioned API are the API endpoints without an `/api` prefix. -These endpoints are all prefixed with `/api/v1` now. - -`-enable-unversioned-api` was added as an option to assist migration to `/api/v1` -but this option was removed in v0.26.0. - -To migrate from the unversioned API, add `/api/v1` to all endpoints that you call -that do not have an `/api` prefix already. - -For example, `/block` would become `/api/v1/block`. - -## Migrating from the JSONRPC API - -The JSONRPC-2.0 RPC API was deprecated in v0.25.0 and removed in v0.26.0. - -Anyone still using this can follow this guide to migrate to the REST API: - -* `get_status` is replaced by `/api/v1/blockchain/metadata` and `/api/v1/health` -* `get_lastblocks` is replaced by `/api/v1/last_blocks` -* `get_blocks` is replaced by `/api/v1/blocks` -* `get_outputs` is replaced by `/api/v1/outputs` -* `inject_transaction` is replaced by `/api/v1/injectTransaction` -* `get_transaction` is replaced by `/api/v1/transaction` - -## Migrating from /api/v1/spend - -The `POST /api/v1/spend` endpoint is deprecated and will be removed in v0.26.0. - -To migrate from it, use [`POST /api/v1/wallet/transaction`](#create-transaction) followed by [`POST /api/v1/injectTransaction`](#inject-raw-transaction). -Do not create another transaction before injecting the created transaction, otherwise you might create two conflicting transactions. - -`POST /api/v1/wallet/transaction` has more options for creating the transaction than the `/api/v1/spend` endpoint. -To replicate the same behavior as `/api/v1/spend`, use the following request body template: - -```json -{ - "hours_selection": { - "type": "auto", - "mode": "share", - "share_factor": "0.5", - }, - "wallet": { - "id": "$wallet_id", - "password": "$password" - }, - "to": [{ - "address": "$dst", - "coins": "$coins" - }] -} -``` - -You must use a string for `"coins"` instead of an integer measured in "droplets" (the smallest unit of currency in Skycoin, 1/1000000 of a skycoin). -For example, if you sent 1 Skycoin with `/api/v1/spend` you would have specified the `coins` field as `1000000`. -Now, you would specify it as `"1"`. - -Some examples: - -* 123.456 coins: before `123456000`, now `"123.456"` -* 0.1 coins: before `100000`, now `"0.1"` -* 1 coin: before `1000000`, now `"1"` - -Extra zeros on the `"coins"` string are ok, for example `"1"` is the same as `"1.0"` or `"1.000000"`. - -Only provide `"password"` if the wallet is encrypted. Note that decryption can take a few seconds, and this can impact -throughput. - -The request header `Content-Type` must be `application/json`. - -The response to `POST /api/v1/wallet/transaction` will include a verbose decoded transaction with details -and the hex-encoded binary transaction in the `"encoded_transaction"` field. -Use the value of `"encoded_transaction"` as the `"rawtx"` value in the request to `/api/v1/injectTransaction`. - -## Migration from /api/v1/explorer/address - -The `GET /api/v1/explorer/address` was deprecated in v0.25.0 and removed in v0.26.0. - -To migrate from it, use [`GET /api/v1/transactions?verbose=1`](#get-transactions-for-addresses). - -`/api/v1/explorer/address` accepted a single `address` query parameter. `/api/v1/transactions` uses an `addrs` query parameter and -accepts multiple addresses at once. - -The response data is the same but the structure is slightly different. Compare the follow two example responses: - -`/api/v1/explorer/address?address=WzPDgdfL1NzSbX96tscUNXUqtCRLjaBugC`: - -```json -[ - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 38076, - "block_seq": 15493 - }, - "timestamp": 1518878675, - "length": 183, - "type": 0, - "txid": "6d8e2f8b436a2f38d604b3aa1196ef2176779c5e11e33fbdd09f993fe659c39f", - "inner_hash": "8da7c64dcedeeb6aa1e0d21fb84a0028dcd68e6801f1a3cc0224fdd50682046f", - "fee": 126249, - "sigs": [ - "c60e43980497daad59b4c72a2eac053b1584f960c57a5e6ac8337118dccfcee4045da3f60d9be674867862a13fdd87af90f4b85cbf39913bde13674e0a039b7800" - ], - "inputs": [ - { - "uxid": "349b06e5707f633fd2d8f048b687b40462d875d968b246831434fb5ab5dcac38", - "owner": "WzPDgdfL1NzSbX96tscUNXUqtCRLjaBugC", - "coins": "125.000000", - "hours": 34596, - "calculated_hours": 178174 - } - ], - "outputs": [ - { - "uxid": "5b4a79c7de2e9099e083bbc8096619ae76ba6fbe34875c61bbe2d3bfa6b18b99", - "dst": "2NfNKsaGJEndpSajJ6TsKJfsdDjW2gFsjXg", - "coins": "125.000000", - "hours": 51925 - } - ] - } -] -``` - -`/api/v1/transactions?verbose=1&addrs=WzPDgdfL1NzSbX96tscUNXUqtCRLjaBugC`: - -```json -[ - { - "status": { - "confirmed": true, - "unconfirmed": false, - "height": 57564, - "block_seq": 7498 - }, - "time": 1514743602, - "txn": { - "timestamp": 1514743602, - "length": 220, - "type": 0, - "txid": "df5bcef198fe6e96d496c30482730f895cabc1d55b338afe5633b0c2889d02f9", - "inner_hash": "4677ff9b9b56485495a45693cc09f8496199929fccb52091d32f2d3cf2ee8a41", - "fee": 69193, - "sigs": [ - "8e1f6f621a11f737ac2031be975d4b2fc17bf9f17a0da0a2fe219ee018011ab506e2ad0367be302a8d859cc355c552313389cd0aa9fa98dc7d2085a52f11ef5a00" - ], - "inputs": [ - { - "uxid": "2374201ff29f1c024ccfc6c53160e741d06720562853ad3613c121acd8389031", - "owner": "2GgFvqoyk9RjwVzj8tqfcXVXB4orBwoc9qv", - "coins": "162768.000000", - "hours": 485, - "calculated_hours": 138385 - } - ], - "outputs": [ - { - "uxid": "63f299fc85fe6fc34d392718eee55909837c7231b6ffd93e5a9a844c4375b313", - "dst": "2GgFvqoyk9RjwVzj8tqfcXVXB4orBwoc9qv", - "coins": "162643.000000", - "hours": 34596 - }, - { - "uxid": "349b06e5707f633fd2d8f048b687b40462d875d968b246831434fb5ab5dcac38", - "dst": "WzPDgdfL1NzSbX96tscUNXUqtCRLjaBugC", - "coins": "125.000000", - "hours": 34596 - } - ] - } - } -] -``` - -The transaction data is wrapped in a `"txn"` field. A `"time"` field is present at the top level. This `"time"` field -is either the confirmation timestamp of a confirmed transaction or the last received time of an unconfirmed transaction. diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/address.go b/vendor/github.com/SkycoinProject/skycoin/src/api/address.go deleted file mode 100644 index 93f2e83..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/address.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -// VerifyAddressRequest is the request data for POST /api/v2/address/verify -type VerifyAddressRequest struct { - Address string `json:"address"` -} - -// VerifyAddressResponse is returned by POST /api/v2/address/verify -type VerifyAddressResponse struct { - Version byte `json:"version"` -} - -// addressVerifyHandler verifies a Skycoin address -// Method: POST -// URI: /api/v2/address/verify -func addressVerifyHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req VerifyAddressRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.Address == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "address is required") - writeHTTPResponse(w, resp) - return - } - - addr, err := cipher.DecodeBase58Address(req.Address) - - if err != nil { - resp := NewHTTPErrorResponse(http.StatusUnprocessableEntity, err.Error()) - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: VerifyAddressResponse{ - Version: addr.Version, - }, - }) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/blockchain.go b/vendor/github.com/SkycoinProject/skycoin/src/api/blockchain.go deleted file mode 100644 index a986ca3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/blockchain.go +++ /dev/null @@ -1,395 +0,0 @@ -package api - -// APIs for blockchain related information - -import ( - "errors" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// blockchainMetadataHandler returns the blockchain metadata -// Method: GET -// URI: /api/v1/blockchain/metadata -func blockchainMetadataHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - visorMetadata, err := gateway.GetBlockchainMetadata() - if err != nil { - err = fmt.Errorf("gateway.GetBlockchainMetadata failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - // This can happen if the node is shut down at the right moment, guard against a panic - if visorMetadata == nil { - err = errors.New("gateway.GetBlockchainMetadata metadata is nil") - wh.Error500(w, err.Error()) - return - } - - metadata := readable.NewBlockchainMetadata(*visorMetadata) - - wh.SendJSONOr500(logger, w, metadata) - } -} - -// blockchainProgressHandler returns the blockchain sync progress -// Method: GET -// URI: /api/v1/blockchain/progress -func blockchainProgressHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - headSeq, _, err := gateway.HeadBkSeq() - if err != nil { - err = fmt.Errorf("gateway.HeadBkSeq failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - progress := gateway.GetBlockchainProgress(headSeq) - - // This can happen if the node is shut down at the right moment, guard against a panic - if progress == nil { - err = errors.New("gateway.GetBlockchainProgress progress is nil") - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, readable.NewBlockchainProgress(progress)) - } -} - -func parseBoolFlag(v string) (bool, error) { - if v == "" { - return false, nil - } - - return strconv.ParseBool(v) -} - -// blockHandler returns a block by hash or seq -// Method: GET -// URI: /api/v1/block -// Args: -// hash [transaction hash string] -// seq [int] -// Note: only one of hash or seq is allowed -func blockHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - hash := r.FormValue("hash") - seq := r.FormValue("seq") - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - switch { - case hash == "" && seq == "": - wh.Error400(w, "should specify one filter, hash or seq") - return - case hash != "" && seq != "": - wh.Error400(w, "should only specify one filter, hash or seq") - return - } - - var h cipher.SHA256 - if hash != "" { - var err error - h, err = cipher.SHA256FromHex(hash) - if err != nil { - wh.Error400(w, err.Error()) - return - } - } - - var uSeq uint64 - if seq != "" { - var err error - uSeq, err = strconv.ParseUint(seq, 10, 64) - if err != nil { - wh.Error400(w, fmt.Sprintf("Invalid seq value %q", seq)) - return - } - } - - if verbose { - var b *coin.SignedBlock - var inputs [][]visor.TransactionInput - - switch { - case hash != "": - b, inputs, err = gateway.GetSignedBlockByHashVerbose(h) - case seq != "": - b, inputs, err = gateway.GetSignedBlockBySeqVerbose(uSeq) - } - - if err != nil { - wh.Error500(w, err.Error()) - return - } - - if b == nil { - wh.Error404(w, "") - return - } - - rb, err := readable.NewBlockVerbose(b.Block, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - return - } - - var b *coin.SignedBlock - switch { - case hash != "": - b, err = gateway.GetSignedBlockByHash(h) - case seq != "": - b, err = gateway.GetSignedBlockBySeq(uSeq) - } - - if err != nil { - wh.Error500(w, err.Error()) - return - } - - if b == nil { - wh.Error404(w, "") - return - } - - rb, err := readable.NewBlock(b.Block) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - } -} - -// blocksHandler returns blocks between a start and end point, -// or an explicit list of sequences. -// If using start and end, the block sequences include both the start and end point. -// Explicit sequences cannot be combined with start and end. -// Method: GET, POST -// URI: /api/v1/blocks -// Args: -// start [int] -// end [int] -// seqs [comma separated list of ints] -// verbose [bool] -func blocksHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - wh.Error405(w) - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - sStart := r.FormValue("start") - sEnd := r.FormValue("end") - sSeqs := r.FormValue("seqs") - - if sSeqs != "" && (sStart != "" || sEnd != "") { - wh.Error400(w, "seqs cannot be used with start or end") - return - } - - if sSeqs == "" && sStart == "" && sEnd == "" { - wh.Error400(w, "At least one of seqs or start or end are required") - return - } - - var start uint64 - var end uint64 - var seqs []uint64 - - if sStart != "" { - var err error - start, err = strconv.ParseUint(sStart, 10, 64) - if err != nil { - wh.Error400(w, fmt.Sprintf("Invalid start value %q", sStart)) - return - } - } - - if sEnd != "" { - var err error - end, err = strconv.ParseUint(sEnd, 10, 64) - if err != nil { - wh.Error400(w, fmt.Sprintf("Invalid end value %q", sEnd)) - return - } - } - - if sSeqs != "" { - ssSeqs := strings.Split(sSeqs, ",") - seqs = make([]uint64, len(ssSeqs)) - seqsMap := make(map[uint64]struct{}, len(ssSeqs)) - for i, s := range ssSeqs { - x, err := strconv.ParseUint(s, 10, 64) - if err != nil { - wh.Error400(w, fmt.Sprintf("Invalid sequence %q at seqs[%d]", s, i)) - return - } - - if _, ok := seqsMap[x]; ok { - wh.Error400(w, fmt.Sprintf("Duplicate sequence %d at seqs[%d]", x, i)) - return - } - seqsMap[x] = struct{}{} - - seqs[i] = x - } - } - - if verbose { - var blocks []coin.SignedBlock - var inputs [][][]visor.TransactionInput - var err error - - if len(seqs) > 0 { - blocks, inputs, err = gateway.GetBlocksVerbose(seqs) - } else { - blocks, inputs, err = gateway.GetBlocksInRangeVerbose(start, end) - } - - if err != nil { - switch err.(type) { - case visor.ErrBlockNotExist: - wh.Error404(w, err.Error()) - default: - wh.Error500(w, err.Error()) - } - return - } - - rb, err := readable.NewBlocksVerbose(blocks, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - } else { - var blocks []coin.SignedBlock - var err error - - if len(seqs) > 0 { - blocks, err = gateway.GetBlocks(seqs) - } else { - blocks, err = gateway.GetBlocksInRange(start, end) - } - - if err != nil { - switch err.(type) { - case visor.ErrBlockNotExist: - wh.Error404(w, err.Error()) - default: - wh.Error500(w, err.Error()) - } - return - } - - rb, err := readable.NewBlocks(blocks) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - } - } -} - -// lastBlocksHandler returns the most recent N blocks on the blockchain -// Method: GET -// URI: /api/v1/last_blocks -// Args: -// num [int] -// verbose [bool] -func lastBlocksHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - num := r.FormValue("num") - n, err := strconv.ParseUint(num, 10, 64) - if err != nil { - wh.Error400(w, fmt.Sprintf("Invalid num value %q", num)) - return - } - - if verbose { - blocks, inputs, err := gateway.GetLastBlocksVerbose(n) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rb, err := readable.NewBlocksVerbose(blocks, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - return - } - - blocks, err := gateway.GetLastBlocks(n) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rb, err := readable.NewBlocks(blocks) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rb) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/client.go b/vendor/github.com/SkycoinProject/skycoin/src/api/client.go deleted file mode 100644 index 3038adc..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/client.go +++ /dev/null @@ -1,1314 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/kvstorage" - "github.com/SkycoinProject/skycoin/src/readable" -) - -const ( - dialTimeout = 60 * time.Second - httpClientTimeout = 120 * time.Second - tlsHandshakeTimeout = 60 * time.Second - - // ContentTypeJSON json content type header - ContentTypeJSON = "application/json" - // ContentTypeForm form data content type header - ContentTypeForm = "application/x-www-form-urlencoded" -) - -// ClientError is used for non-200 API responses -type ClientError struct { - Status string - StatusCode int - Message string -} - -// NewClientError creates a ClientError -func NewClientError(status string, statusCode int, message string) ClientError { - return ClientError{ - Status: status, - StatusCode: statusCode, - Message: strings.TrimRight(message, "\n"), - } -} - -func (e ClientError) Error() string { - return e.Message -} - -// ReceivedHTTPResponse parsed a HTTPResponse received by the Client, for the V2 API -type ReceivedHTTPResponse struct { - Error *HTTPError `json:"error,omitempty"` - Data json.RawMessage `json:"data"` -} - -// Client provides an interface to a remote node's HTTP API -type Client struct { - HTTPClient *http.Client - Addr string - Username string - Password string -} - -// NewClient creates a Client -func NewClient(addr string) *Client { - transport := &http.Transport{ - Dial: (&net.Dialer{ - Timeout: dialTimeout, - }).Dial, - TLSHandshakeTimeout: tlsHandshakeTimeout, - } - httpClient := &http.Client{ - Transport: transport, - Timeout: httpClientTimeout, - } - addr = strings.TrimRight(addr, "/") - addr += "/" - - return &Client{ - Addr: addr, - HTTPClient: httpClient, - } -} - -// SetAuth configures the Client's request authentication -func (c *Client) SetAuth(username, password string) { - c.Username = username - c.Password = password -} - -func (c *Client) applyAuth(req *http.Request) { - if c.Username == "" && c.Password == "" { - return - } - - req.SetBasicAuth(c.Username, c.Password) -} - -// GetV2 makes a GET request to an endpoint and unmarshals the response to respObj. -// If the response is not 200 OK, returns an error -func (c *Client) GetV2(endpoint string, respObj interface{}) (bool, error) { - return c.requestV2(http.MethodGet, endpoint, nil, respObj) -} - -// Get makes a GET request to an endpoint and unmarshals the response to obj. -// If the response is not 200 OK, returns an error -func (c *Client) Get(endpoint string, obj interface{}) error { - resp, err := c.get(endpoint) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return NewClientError(resp.Status, resp.StatusCode, string(body)) - } - - if obj == nil { - return nil - } - - d := json.NewDecoder(resp.Body) - d.DisallowUnknownFields() - return d.Decode(obj) -} - -// get makes a GET request to an endpoint. Caller must close response body. -func (c *Client) get(endpoint string) (*http.Response, error) { - return c.makeRequestWithoutBody(endpoint, http.MethodGet) -} - -// makeRequestWithoutBody makes a `method` request to an endpoint. Caller must close response body. -func (c *Client) makeRequestWithoutBody(endpoint, method string) (*http.Response, error) { - endpoint = strings.TrimLeft(endpoint, "/") - endpoint = c.Addr + endpoint - - req, err := http.NewRequest(method, endpoint, nil) - if err != nil { - return nil, err - } - - c.applyAuth(req) - - return c.HTTPClient.Do(req) -} - -// DeleteV2 makes a DELETE request to an endpoint with body of json data, -// and parses the standard JSON response. -func (c *Client) DeleteV2(endpoint string, respObj interface{}) (bool, error) { - return c.requestV2(http.MethodDelete, endpoint, nil, respObj) -} - -// PostForm makes a POST request to an endpoint with body of ContentTypeForm formated data. -func (c *Client) PostForm(endpoint string, body io.Reader, obj interface{}) error { - return c.Post(endpoint, ContentTypeForm, body, obj) -} - -// PostJSON makes a POST request to an endpoint with body of json data. -func (c *Client) PostJSON(endpoint string, reqObj, respObj interface{}) error { - body, err := json.Marshal(reqObj) - if err != nil { - return err - } - - return c.Post(endpoint, ContentTypeJSON, bytes.NewReader(body), respObj) -} - -// Post makes a POST request to an endpoint. -func (c *Client) Post(endpoint string, contentType string, body io.Reader, obj interface{}) error { - csrf, err := c.CSRF() - if err != nil { - return err - } - - endpoint = strings.TrimLeft(endpoint, "/") - endpoint = c.Addr + endpoint - - req, err := http.NewRequest(http.MethodPost, endpoint, body) - if err != nil { - return err - } - - c.applyAuth(req) - - if csrf != "" { - req.Header.Set(CSRFHeaderName, csrf) - } - - req.Header.Set("Content-Type", contentType) - - resp, err := c.HTTPClient.Do(req) - if err != nil { - return err - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return NewClientError(resp.Status, resp.StatusCode, string(body)) - } - - if obj == nil { - return nil - } - - decoder := json.NewDecoder(resp.Body) - decoder.DisallowUnknownFields() - return decoder.Decode(obj) -} - -// PostJSONV2 makes a POST request to an endpoint with body of json data, -// and parses the standard JSON response. -func (c *Client) PostJSONV2(endpoint string, reqObj, respObj interface{}) (bool, error) { - body, err := json.Marshal(reqObj) - if err != nil { - return false, err - } - - return c.requestV2(http.MethodPost, endpoint, bytes.NewReader(body), respObj) -} - -func (c *Client) requestV2(method, endpoint string, body io.Reader, respObj interface{}) (bool, error) { - csrf, err := c.CSRF() - if err != nil { - return false, err - } - - endpoint = strings.TrimLeft(endpoint, "/") - endpoint = c.Addr + endpoint - - req, err := http.NewRequest(method, endpoint, body) - if err != nil { - return false, err - } - - c.applyAuth(req) - - if csrf != "" { - req.Header.Set(CSRFHeaderName, csrf) - } - - switch method { - case http.MethodPost: - req.Header.Set("Content-Type", ContentTypeJSON) - } - - req.Header.Set("Accept", ContentTypeJSON) - - resp, err := c.HTTPClient.Do(req) - if err != nil { - return false, err - } - - defer resp.Body.Close() - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return false, err - } - - decoder := json.NewDecoder(bytes.NewReader(respBody)) - decoder.DisallowUnknownFields() - - var wrapObj ReceivedHTTPResponse - if err := decoder.Decode(&wrapObj); err != nil { - // In some cases, the server can send an error response in a non-JSON format, - // such as a 404 when the endpoint is not registered, or if a 500 error - // occurs in the go HTTP stack, outside of the application's control. - // If this happens, treat the entire response body as the error message. - if resp.StatusCode != http.StatusOK { - return false, NewClientError(resp.Status, resp.StatusCode, string(respBody)) - } - - return false, err - } - - // The JSON decoder stops at the end of the first valid JSON object. - // Check that there is no trailing data after the end of the first valid JSON object. - // This could occur if an endpoint mistakenly wrote an object twice, for example. - // This line returns the decoder's underlying read buffer. Read(nil) will return io.EOF - // if the buffer was completely consumed. - if _, err := decoder.Buffered().Read(nil); err != io.EOF { - return false, NewClientError(resp.Status, resp.StatusCode, "Response has additional bytes after the first JSON object: "+string(respBody)) - } - - var rspErr error - if resp.StatusCode != http.StatusOK { - rspErr = NewClientError(resp.Status, resp.StatusCode, wrapObj.Error.Message) - } - - if wrapObj.Data == nil { - return false, rspErr - } - - decoder = json.NewDecoder(bytes.NewReader(wrapObj.Data)) - decoder.DisallowUnknownFields() - - if err := decoder.Decode(respObj); err != nil { - return false, err - } - - return true, rspErr -} - -// CSRF returns a CSRF token. If CSRF is disabled on the node, returns an empty string and nil error. -func (c *Client) CSRF() (string, error) { - resp, err := c.get("/api/v1/csrf") - if err != nil { - return "", err - } - - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - // CSRF is disabled on the node - return "", nil - default: - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return "", NewClientError(resp.Status, resp.StatusCode, string(body)) - } - - d := json.NewDecoder(resp.Body) - d.DisallowUnknownFields() - - var m map[string]string - if err := d.Decode(&m); err != nil { - return "", err - } - - token, ok := m["csrf_token"] - if !ok { - return "", errors.New("csrf_token not found in response") - } - - return token, nil -} - -// Version makes a request to GET /api/v1/version -func (c *Client) Version() (*readable.BuildInfo, error) { - var bi readable.BuildInfo - if err := c.Get("/api/v1/version", &bi); err != nil { - return nil, err - } - return &bi, nil -} - -// Outputs makes a request to GET /api/v1/outputs -func (c *Client) Outputs() (*readable.UnspentOutputsSummary, error) { - var o readable.UnspentOutputsSummary - if err := c.Get("/api/v1/outputs", &o); err != nil { - return nil, err - } - return &o, nil -} - -// OutputsForAddresses makes a request to POST /api/v1/outputs?addrs=xxx -func (c *Client) OutputsForAddresses(addrs []string) (*readable.UnspentOutputsSummary, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - - endpoint := "/api/v1/outputs" - - var o readable.UnspentOutputsSummary - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &o); err != nil { - return nil, err - } - return &o, nil -} - -// OutputsForHashes makes a request to POST /api/v1/outputs?hashes=zzz -func (c *Client) OutputsForHashes(hashes []string) (*readable.UnspentOutputsSummary, error) { - v := url.Values{} - v.Add("hashes", strings.Join(hashes, ",")) - endpoint := "/api/v1/outputs" - - var o readable.UnspentOutputsSummary - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &o); err != nil { - return nil, err - } - return &o, nil -} - -// CoinSupply makes a request to GET /api/v1/coinSupply -func (c *Client) CoinSupply() (*CoinSupply, error) { - var cs CoinSupply - if err := c.Get("/api/v1/coinSupply", &cs); err != nil { - return nil, err - } - return &cs, nil -} - -// BlockByHash makes a request to GET /api/v1/block?hash=xxx -func (c *Client) BlockByHash(hash string) (*readable.Block, error) { - v := url.Values{} - v.Add("hash", hash) - endpoint := "/api/v1/block?" + v.Encode() - - var b readable.Block - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlockByHashVerbose makes a request to GET /api/v1/block?hash=xxx&verbose=1 -func (c *Client) BlockByHashVerbose(hash string) (*readable.BlockVerbose, error) { - v := url.Values{} - v.Add("hash", hash) - v.Add("verbose", "1") - endpoint := "/api/v1/block?" + v.Encode() - - var b readable.BlockVerbose - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlockBySeq makes a request to GET /api/v1/block?seq=xxx -func (c *Client) BlockBySeq(seq uint64) (*readable.Block, error) { - v := url.Values{} - v.Add("seq", fmt.Sprint(seq)) - endpoint := "/api/v1/block?" + v.Encode() - - var b readable.Block - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlockBySeqVerbose makes a request to GET /api/v1/block?seq=xxx&verbose=1 -func (c *Client) BlockBySeqVerbose(seq uint64) (*readable.BlockVerbose, error) { - v := url.Values{} - v.Add("seq", fmt.Sprint(seq)) - v.Add("verbose", "1") - endpoint := "/api/v1/block?" + v.Encode() - - var b readable.BlockVerbose - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// Blocks makes a request to POST /api/v1/blocks?seqs= -func (c *Client) Blocks(seqs []uint64) (*readable.Blocks, error) { - sSeqs := make([]string, len(seqs)) - for i, x := range seqs { - sSeqs[i] = fmt.Sprint(x) - } - - v := url.Values{} - v.Add("seqs", strings.Join(sSeqs, ",")) - endpoint := "/api/v1/blocks" - - var b readable.Blocks - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlocksVerbose makes a request to POST /api/v1/blocks?verbose=1&seqs= -func (c *Client) BlocksVerbose(seqs []uint64) (*readable.BlocksVerbose, error) { - sSeqs := make([]string, len(seqs)) - for i, x := range seqs { - sSeqs[i] = fmt.Sprint(x) - } - - v := url.Values{} - v.Add("seqs", strings.Join(sSeqs, ",")) - v.Add("verbose", "1") - endpoint := "/api/v1/blocks" - - var b readable.BlocksVerbose - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlocksInRange makes a request to GET /api/v1/blocks?start=&end= -func (c *Client) BlocksInRange(start, end uint64) (*readable.Blocks, error) { - v := url.Values{} - v.Add("start", fmt.Sprint(start)) - v.Add("end", fmt.Sprint(end)) - endpoint := "/api/v1/blocks?" + v.Encode() - - var b readable.Blocks - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlocksInRangeVerbose makes a request to GET /api/v1/blocks?verbose=1&start=&end= -func (c *Client) BlocksInRangeVerbose(start, end uint64) (*readable.BlocksVerbose, error) { - v := url.Values{} - v.Add("start", fmt.Sprint(start)) - v.Add("end", fmt.Sprint(end)) - v.Add("verbose", "1") - endpoint := "/api/v1/blocks?" + v.Encode() - - var b readable.BlocksVerbose - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// LastBlocks makes a request to GET /api/v1/last_blocks -func (c *Client) LastBlocks(n uint64) (*readable.Blocks, error) { - v := url.Values{} - v.Add("num", fmt.Sprint(n)) - endpoint := "/api/v1/last_blocks?" + v.Encode() - - var b readable.Blocks - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// LastBlocksVerbose makes a request to GET /api/v1/last_blocks?verbose=1 -func (c *Client) LastBlocksVerbose(n uint64) (*readable.BlocksVerbose, error) { - v := url.Values{} - v.Add("num", fmt.Sprint(n)) - v.Add("verbose", "1") - endpoint := "/api/v1/last_blocks?" + v.Encode() - - var b readable.BlocksVerbose - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlockchainMetadata makes a request to GET /api/v1/blockchain/metadata -func (c *Client) BlockchainMetadata() (*readable.BlockchainMetadata, error) { - var b readable.BlockchainMetadata - if err := c.Get("/api/v1/blockchain/metadata", &b); err != nil { - return nil, err - } - return &b, nil -} - -// BlockchainProgress makes a request to GET /api/v1/blockchain/progress -func (c *Client) BlockchainProgress() (*readable.BlockchainProgress, error) { - var b readable.BlockchainProgress - if err := c.Get("/api/v1/blockchain/progress", &b); err != nil { - return nil, err - } - return &b, nil -} - -// Balance makes a request to POST /api/v1/balance?addrs=xxx -func (c *Client) Balance(addrs []string) (*BalanceResponse, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - endpoint := "/api/v1/balance" - - var b BalanceResponse - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &b); err != nil { - return nil, err - } - return &b, nil -} - -// UxOut makes a request to GET /api/v1/uxout?uxid=xxx -func (c *Client) UxOut(uxID string) (*readable.SpentOutput, error) { - v := url.Values{} - v.Add("uxid", uxID) - endpoint := "/api/v1/uxout?" + v.Encode() - - var b readable.SpentOutput - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// AddressUxOuts makes a request to GET /api/v1/address_uxouts -func (c *Client) AddressUxOuts(addr string) ([]readable.SpentOutput, error) { - v := url.Values{} - v.Add("address", addr) - endpoint := "/api/v1/address_uxouts?" + v.Encode() - - var b []readable.SpentOutput - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return b, nil -} - -// Wallet makes a request to GET /api/v1/wallet -func (c *Client) Wallet(id string) (*WalletResponse, error) { - v := url.Values{} - v.Add("id", id) - endpoint := "/api/v1/wallet?" + v.Encode() - - var wr WalletResponse - if err := c.Get(endpoint, &wr); err != nil { - return nil, err - } - - return &wr, nil -} - -// Wallets makes a request to GET /api/v1/wallets -func (c *Client) Wallets() ([]WalletResponse, error) { - var wrs []WalletResponse - if err := c.Get("/api/v1/wallets", &wrs); err != nil { - return nil, err - } - - return wrs, nil -} - -// CreateWalletOptions are the options for creating a wallet -type CreateWalletOptions struct { - Type string - Seed string - SeedPassphrase string - Label string - Password string - ScanN int - XPub string - Encrypt bool -} - -// CreateWallet makes a request to POST /api/v1/wallet/create and creates a wallet. -// If scanN is <= 0, the scan number defaults to 1 -func (c *Client) CreateWallet(o CreateWalletOptions) (*WalletResponse, error) { - v := url.Values{} - v.Add("type", o.Type) - v.Add("seed", o.Seed) - v.Add("seed-passphrase", o.SeedPassphrase) - v.Add("label", o.Label) - v.Add("password", o.Password) - v.Add("encrypt", fmt.Sprint(o.Encrypt)) - v.Add("xpub", o.XPub) - - if o.ScanN > 0 { - v.Add("scan", fmt.Sprint(o.ScanN)) - } - - var w WalletResponse - if err := c.PostForm("/api/v1/wallet/create", strings.NewReader(v.Encode()), &w); err != nil { - return nil, err - } - return &w, nil -} - -// NewWalletAddress makes a request to POST /api/v1/wallet/newAddress -// if n is <= 0, defaults to 1 -func (c *Client) NewWalletAddress(id string, n int, password string) ([]string, error) { - v := url.Values{} - v.Add("id", id) - if n > 0 { - v.Add("num", fmt.Sprint(n)) - } - - v.Add("password", password) - - var obj struct { - Addresses []string `json:"addresses"` - } - if err := c.PostForm("/api/v1/wallet/newAddress", strings.NewReader(v.Encode()), &obj); err != nil { - return nil, err - } - return obj.Addresses, nil -} - -// WalletBalance makes a request to GET /api/v1/wallet/balance -func (c *Client) WalletBalance(id string) (*BalanceResponse, error) { - v := url.Values{} - v.Add("id", id) - endpoint := "/api/v1/wallet/balance?" + v.Encode() - - var b BalanceResponse - if err := c.Get(endpoint, &b); err != nil { - return nil, err - } - return &b, nil -} - -// CreateTransactionRequest is sent to /api/v2/transaction -type CreateTransactionRequest struct { - IgnoreUnconfirmed bool `json:"ignore_unconfirmed"` - HoursSelection HoursSelection `json:"hours_selection"` - ChangeAddress *string `json:"change_address,omitempty"` - To []Receiver `json:"to"` - UxOuts []string `json:"unspents,omitempty"` - Addresses []string `json:"addresses,omitempty"` -} - -// HoursSelection defines options for hours distribution -type HoursSelection struct { - Type string `json:"type"` - Mode string `json:"mode"` - ShareFactor string `json:"share_factor,omitempty"` -} - -// Receiver specifies a spend destination -type Receiver struct { - Address string `json:"address"` - Coins string `json:"coins"` - Hours string `json:"hours,omitempty"` -} - -// WalletCreateTransactionRequest is sent to /api/v1/wallet/transaction -type WalletCreateTransactionRequest struct { - Unsigned bool `json:"unsigned"` - WalletID string `json:"wallet_id"` - Password string `json:"password"` - CreateTransactionRequest -} - -// WalletCreateTransaction makes a request to POST /api/v1/wallet/transaction -func (c *Client) WalletCreateTransaction(req WalletCreateTransactionRequest) (*CreateTransactionResponse, error) { - var r CreateTransactionResponse - endpoint := "/api/v1/wallet/transaction" - if err := c.PostJSON(endpoint, req, &r); err != nil { - return nil, err - } - - return &r, nil -} - -// WalletSignTransaction makes a request to POST /api/v2/wallet/transaction/sign -func (c *Client) WalletSignTransaction(req WalletSignTransactionRequest) (*CreateTransactionResponse, error) { - var r CreateTransactionResponse - endpoint := "/api/v2/wallet/transaction/sign" - ok, err := c.PostJSONV2(endpoint, req, &r) - if ok { - return &r, err - } - return nil, err -} - -// CreateTransaction makes a request to POST /api/v2/transaction -func (c *Client) CreateTransaction(req CreateTransactionRequest) (*CreateTransactionResponse, error) { - var r CreateTransactionResponse - endpoint := "/api/v2/transaction" - ok, err := c.PostJSONV2(endpoint, req, &r) - if ok { - return &r, err - } - return nil, err -} - -// WalletUnconfirmedTransactions makes a request to GET /api/v1/wallet/transactions -func (c *Client) WalletUnconfirmedTransactions(id string) (*UnconfirmedTxnsResponse, error) { - v := url.Values{} - v.Add("id", id) - endpoint := "/api/v1/wallet/transactions?" + v.Encode() - - var utx *UnconfirmedTxnsResponse - if err := c.Get(endpoint, &utx); err != nil { - return nil, err - } - return utx, nil -} - -// WalletUnconfirmedTransactionsVerbose makes a request to GET /api/v1/wallet/transactions&verbose=1 -func (c *Client) WalletUnconfirmedTransactionsVerbose(id string) (*UnconfirmedTxnsVerboseResponse, error) { - v := url.Values{} - v.Add("id", id) - v.Add("verbose", "1") - endpoint := "/api/v1/wallet/transactions?" + v.Encode() - - var utx *UnconfirmedTxnsVerboseResponse - if err := c.Get(endpoint, &utx); err != nil { - return nil, err - } - return utx, nil -} - -// UpdateWallet makes a request to POST /api/v1/wallet/update -func (c *Client) UpdateWallet(id, label string) error { - v := url.Values{} - v.Add("id", id) - v.Add("label", label) - - return c.PostForm("/api/v1/wallet/update", strings.NewReader(v.Encode()), nil) -} - -// WalletFolderName makes a request to GET /api/v1/wallets/folderName -func (c *Client) WalletFolderName() (*WalletFolder, error) { - var w WalletFolder - if err := c.Get("/api/v1/wallets/folderName", &w); err != nil { - return nil, err - } - return &w, nil -} - -// NewSeed makes a request to GET /api/v1/wallet/newSeed -// entropy must be 128 or 256 -func (c *Client) NewSeed(entropy int) (string, error) { - v := url.Values{} - v.Add("entropy", fmt.Sprint(entropy)) - endpoint := "/api/v1/wallet/newSeed?" + v.Encode() - - var r struct { - Seed string `json:"seed"` - } - if err := c.Get(endpoint, &r); err != nil { - return "", err - } - return r.Seed, nil -} - -// VerifySeed verifies whether the given seed is a valid bip39 mnemonic or not -func (c *Client) VerifySeed(seed string) (bool, error) { - ok, err := c.PostJSONV2("/api/v2/wallet/seed/verify", VerifySeedRequest{ - Seed: seed, - }, &struct{}{}) - if err != nil { - return false, err - } - return ok, nil -} - -// WalletSeed makes a request to POST /api/v1/wallet/seed -func (c *Client) WalletSeed(id string, password string) (*WalletSeedResponse, error) { - v := url.Values{} - v.Add("id", id) - v.Add("password", password) - - var r WalletSeedResponse - if err := c.PostForm("/api/v1/wallet/seed", strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - - return &r, nil -} - -// NetworkConnection makes a request to GET /api/v1/network/connection -func (c *Client) NetworkConnection(addr string) (*readable.Connection, error) { - v := url.Values{} - v.Add("addr", addr) - endpoint := "/api/v1/network/connection?" + v.Encode() - - var dc readable.Connection - if err := c.Get(endpoint, &dc); err != nil { - return nil, err - } - return &dc, nil -} - -// NetworkConnectionsFilter filters for network connections -type NetworkConnectionsFilter struct { - States []daemon.ConnectionState // "pending", "connected" and "introduced" - Direction string // "incoming" or "outgoing" -} - -// NetworkConnections makes a request to GET /api/v1/network/connections. -// Connections can be filtered by state and direction. By default, "connected" and "introduced" connections -// of both directions are returned. -func (c *Client) NetworkConnections(filters *NetworkConnectionsFilter) (*Connections, error) { - v := url.Values{} - if filters != nil { - if len(filters.States) != 0 { - states := make([]string, len(filters.States)) - for i, s := range filters.States { - states[i] = string(s) - } - v.Add("states", strings.Join(states, ",")) - } - if filters.Direction != "" { - v.Add("direction", filters.Direction) - } - } - endpoint := "/api/v1/network/connections?" + v.Encode() - - var dc Connections - if err := c.Get(endpoint, &dc); err != nil { - return nil, err - } - return &dc, nil -} - -// NetworkDefaultPeers makes a request to GET /api/v1/network/defaultConnections -func (c *Client) NetworkDefaultPeers() ([]string, error) { - var dc []string - if err := c.Get("/api/v1/network/defaultConnections", &dc); err != nil { - return nil, err - } - return dc, nil -} - -// NetworkTrustedPeers makes a request to GET /api/v1/network/connections/trust -func (c *Client) NetworkTrustedPeers() ([]string, error) { - var dc []string - if err := c.Get("/api/v1/network/connections/trust", &dc); err != nil { - return nil, err - } - return dc, nil -} - -// NetworkExchangedPeers makes a request to GET /api/v1/network/connections/exchange -func (c *Client) NetworkExchangedPeers() ([]string, error) { - var dc []string - if err := c.Get("/api/v1/network/connections/exchange", &dc); err != nil { - return nil, err - } - return dc, nil -} - -// PendingTransactions makes a request to GET /api/v1/pendingTxs -func (c *Client) PendingTransactions() ([]readable.UnconfirmedTransactions, error) { - var v []readable.UnconfirmedTransactions - if err := c.Get("/api/v1/pendingTxs", &v); err != nil { - return nil, err - } - return v, nil -} - -// PendingTransactionsVerbose makes a request to GET /api/v1/pendingTxs?verbose=1 -func (c *Client) PendingTransactionsVerbose() ([]readable.UnconfirmedTransactionVerbose, error) { - var v []readable.UnconfirmedTransactionVerbose - if err := c.Get("/api/v1/pendingTxs?verbose=1", &v); err != nil { - return nil, err - } - return v, nil -} - -// Transaction makes a request to GET /api/v1/transaction -func (c *Client) Transaction(txid string) (*readable.TransactionWithStatus, error) { - v := url.Values{} - v.Add("txid", txid) - endpoint := "/api/v1/transaction?" + v.Encode() - - var r readable.TransactionWithStatus - if err := c.Get(endpoint, &r); err != nil { - return nil, err - } - return &r, nil -} - -// TransactionVerbose makes a request to GET /api/v1/transaction?verbose=1 -func (c *Client) TransactionVerbose(txid string) (*readable.TransactionWithStatusVerbose, error) { - v := url.Values{} - v.Add("txid", txid) - v.Add("verbose", "1") - endpoint := "/api/v1/transaction?" + v.Encode() - - var r readable.TransactionWithStatusVerbose - if err := c.Get(endpoint, &r); err != nil { - return nil, err - } - return &r, nil -} - -// TransactionEncoded makes a request to GET /api/v1/transaction?encoded=1 -func (c *Client) TransactionEncoded(txid string) (*TransactionEncodedResponse, error) { - v := url.Values{} - v.Add("txid", txid) - v.Add("encoded", "1") - endpoint := "/api/v1/transaction?" + v.Encode() - - var r TransactionEncodedResponse - if err := c.Get(endpoint, &r); err != nil { - return nil, err - } - return &r, nil -} - -// Transactions makes a request to POST /api/v1/transactions -func (c *Client) Transactions(addrs []string) ([]readable.TransactionWithStatus, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatus - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// ConfirmedTransactions makes a request to POST /api/v1/transactions?confirmed=true -func (c *Client) ConfirmedTransactions(addrs []string) ([]readable.TransactionWithStatus, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - v.Add("confirmed", "true") - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatus - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// UnconfirmedTransactions makes a request to POST /api/v1/transactions?confirmed=false -func (c *Client) UnconfirmedTransactions(addrs []string) ([]readable.TransactionWithStatus, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - v.Add("confirmed", "false") - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatus - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// TransactionsVerbose makes a request to POST /api/v1/transactions?verbose=1 -func (c *Client) TransactionsVerbose(addrs []string) ([]readable.TransactionWithStatusVerbose, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - v.Add("verbose", "1") - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatusVerbose - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// ConfirmedTransactionsVerbose makes a request to POST /api/v1/transactions?confirmed=true&verbose=1 -func (c *Client) ConfirmedTransactionsVerbose(addrs []string) ([]readable.TransactionWithStatusVerbose, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - v.Add("confirmed", "true") - v.Add("verbose", "1") - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatusVerbose - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// UnconfirmedTransactionsVerbose makes a request to POST /api/v1/transactions?confirmed=false&verbose=1 -func (c *Client) UnconfirmedTransactionsVerbose(addrs []string) ([]readable.TransactionWithStatusVerbose, error) { - v := url.Values{} - v.Add("addrs", strings.Join(addrs, ",")) - v.Add("confirmed", "false") - v.Add("verbose", "1") - endpoint := "/api/v1/transactions" - - var r []readable.TransactionWithStatusVerbose - if err := c.PostForm(endpoint, strings.NewReader(v.Encode()), &r); err != nil { - return nil, err - } - return r, nil -} - -// InjectTransaction makes a request to POST /api/v1/injectTransaction. -func (c *Client) InjectTransaction(txn *coin.Transaction) (string, error) { - rawTxn, err := txn.SerializeHex() - if err != nil { - return "", err - } - return c.InjectEncodedTransaction(rawTxn) -} - -// InjectTransactionNoBroadcast makes a request to POST /api/v1/injectTransaction -// but does not broadcast the transaction. -func (c *Client) InjectTransactionNoBroadcast(txn *coin.Transaction) (string, error) { - rawTxn, err := txn.SerializeHex() - if err != nil { - return "", err - } - return c.InjectEncodedTransactionNoBroadcast(rawTxn) -} - -// InjectEncodedTransaction makes a request to POST /api/v1/injectTransaction. -// rawTxn is a hex-encoded, serialized transaction -func (c *Client) InjectEncodedTransaction(rawTxn string) (string, error) { - return c.injectEncodedTransaction(rawTxn, false) -} - -// InjectEncodedTransactionNoBroadcast makes a request to POST /api/v1/injectTransaction -// but does not broadcast the transaction. -// rawTxn is a hex-encoded, serialized transaction -func (c *Client) InjectEncodedTransactionNoBroadcast(rawTxn string) (string, error) { - return c.injectEncodedTransaction(rawTxn, true) -} - -func (c *Client) injectEncodedTransaction(rawTxn string, noBroadcast bool) (string, error) { - v := InjectTransactionRequest{ - RawTxn: rawTxn, - NoBroadcast: noBroadcast, - } - - var txid string - if err := c.PostJSON("/api/v1/injectTransaction", v, &txid); err != nil { - return "", err - } - return txid, nil -} - -// ResendUnconfirmedTransactions makes a request to POST /api/v1/resendUnconfirmedTxns -func (c *Client) ResendUnconfirmedTransactions() (*ResendResult, error) { - endpoint := "/api/v1/resendUnconfirmedTxns" - var r ResendResult - if err := c.PostForm(endpoint, strings.NewReader(""), &r); err != nil { - return nil, err - } - return &r, nil -} - -// RawTransaction makes a request to GET /api/v1/rawtx -func (c *Client) RawTransaction(txid string) (string, error) { - v := url.Values{} - v.Add("txid", txid) - endpoint := "/api/v1/rawtx?" + v.Encode() - - var rawTxn string - if err := c.Get(endpoint, &rawTxn); err != nil { - return "", err - } - return rawTxn, nil -} - -// VerifyTransaction makes a request to POST /api/v2/transaction/verify. -func (c *Client) VerifyTransaction(req VerifyTransactionRequest) (*VerifyTransactionResponse, error) { - var rsp VerifyTransactionResponse - ok, err := c.PostJSONV2("/api/v2/transaction/verify", req, &rsp) - if ok { - return &rsp, err - } - - return nil, err -} - -// VerifyAddress makes a request to POST /api/v2/address/verify -// The API may respond with an error but include data useful for processing, -// so both return values may be non-nil. -func (c *Client) VerifyAddress(addr string) (*VerifyAddressResponse, error) { - req := VerifyAddressRequest{ - Address: addr, - } - - var rsp VerifyAddressResponse - ok, err := c.PostJSONV2("/api/v2/address/verify", req, &rsp) - if ok { - return &rsp, err - } - - return nil, err -} - -// RichlistParams are arguments to the /richlist endpoint -type RichlistParams struct { - N int - IncludeDistribution bool -} - -// Richlist makes a request to GET /api/v1/richlist -func (c *Client) Richlist(params *RichlistParams) (*Richlist, error) { - endpoint := "/api/v1/richlist" - - if params != nil { - v := url.Values{} - v.Add("n", fmt.Sprint(params.N)) - v.Add("include-distribution", fmt.Sprint(params.IncludeDistribution)) - endpoint = "/api/v1/richlist?" + v.Encode() - } - - var r Richlist - if err := c.Get(endpoint, &r); err != nil { - return nil, err - } - return &r, nil -} - -// AddressCount makes a request to GET /api/v1/addresscount -func (c *Client) AddressCount() (uint64, error) { - var r struct { - Count uint64 `json:"count"` - } - if err := c.Get("/api/v1/addresscount", &r); err != nil { - return 0, err - } - return r.Count, nil - -} - -// UnloadWallet makes a request to POST /api/v1/wallet/unload -func (c *Client) UnloadWallet(id string) error { - v := url.Values{} - v.Add("id", id) - return c.PostForm("/api/v1/wallet/unload", strings.NewReader(v.Encode()), nil) -} - -// Health makes a request to GET /api/v1/health -func (c *Client) Health() (*HealthResponse, error) { - var r HealthResponse - if err := c.Get("/api/v1/health", &r); err != nil { - return nil, err - } - - return &r, nil -} - -// EncryptWallet makes a request to POST /api/v1/wallet/encrypt to encrypt a specific wallet with the given password -func (c *Client) EncryptWallet(id, password string) (*WalletResponse, error) { - v := url.Values{} - v.Add("id", id) - v.Add("password", password) - var wlt WalletResponse - if err := c.PostForm("/api/v1/wallet/encrypt", strings.NewReader(v.Encode()), &wlt); err != nil { - return nil, err - } - - return &wlt, nil -} - -// DecryptWallet makes a request to POST /api/v1/wallet/decrypt to decrypt a wallet -func (c *Client) DecryptWallet(id, password string) (*WalletResponse, error) { - v := url.Values{} - v.Add("id", id) - v.Add("password", password) - var wlt WalletResponse - if err := c.PostForm("/api/v1/wallet/decrypt", strings.NewReader(v.Encode()), &wlt); err != nil { - return nil, err - } - - return &wlt, nil -} - -// RecoverWallet makes a request to POST /api/v2/wallet/recover to recover an encrypted wallet by seed. -// The password argument is optional, if provided, the recovered wallet will be encrypted with this password, -// otherwise the recovered wallet will be unencrypted. -func (c *Client) RecoverWallet(req WalletRecoverRequest) (*WalletResponse, error) { - var rsp WalletResponse - ok, err := c.PostJSONV2("/api/v2/wallet/recover", req, &rsp) - if ok { - return &rsp, err - } - - return nil, err -} - -// Disconnect disconnect a connections by ID -func (c *Client) Disconnect(id uint64) error { - v := url.Values{} - v.Add("id", fmt.Sprint(id)) - - var obj struct{} - return c.PostForm("/api/v1/network/connection/disconnect", strings.NewReader(v.Encode()), &obj) -} - -// GetAllStorageValues makes a GET request to /api/v2/data to get all the values from the storage of -// `storageType` type -func (c *Client) GetAllStorageValues(storageType kvstorage.Type) (map[string]string, error) { - var values map[string]string - ok, err := c.GetV2(fmt.Sprintf("/api/v2/data?type=%s", storageType), &values) - if !ok { - return nil, err - } - - return values, err -} - -// GetStorageValue makes a GET request to /api/v2/data to get the value associated with `key` from storage -// of `storageType` type -func (c *Client) GetStorageValue(storageType kvstorage.Type, key string) (string, error) { - var value string - ok, err := c.GetV2(fmt.Sprintf("/api/v2/data?type=%s&key=%s", storageType, key), &value) - if !ok { - return "", err - } - - return value, err -} - -// AddStorageValue make a POST request to /api/v2/data to add a value with the key to the storage -// of `storageType` type -func (c *Client) AddStorageValue(storageType kvstorage.Type, key, val string) error { - _, err := c.PostJSONV2("/api/v2/data", StorageRequest{ - StorageType: storageType, - Key: key, - Val: val, - }, nil) - - return err -} - -// RemoveStorageValue makes a DELETE request to /api/v2/data to remove a value associated with the `key` -// from the storage of `storageType` type -func (c *Client) RemoveStorageValue(storageType kvstorage.Type, key string) error { - _, err := c.DeleteV2(fmt.Sprintf("/api/v2/data?type=%s&key=%s", storageType, key), nil) - - return err -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/csrf.go b/vendor/github.com/SkycoinProject/skycoin/src/api/csrf.go deleted file mode 100644 index 48024a0..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/csrf.go +++ /dev/null @@ -1,165 +0,0 @@ -package api - -import ( - "net/http" - "time" - - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - - "github.com/SkycoinProject/skycoin/src/cipher" - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -const ( - // CSRFHeaderName is the name of the CSRF header - CSRFHeaderName = "X-CSRF-Token" - - // CSRFMaxAge is the lifetime of a CSRF token in seconds - CSRFMaxAge = time.Second * 30 - - csrfSecretLength = 64 - - csrfNonceLength = 64 -) - -var ( - // ErrCSRFInvalid is returned when the the CSRF token is in invalid format - ErrCSRFInvalid = errors.New("invalid CSRF token") - // ErrCSRFInvalidSignature is returned when the signature of the csrf token is invalid - ErrCSRFInvalidSignature = errors.New("invalid CSRF token signature") - // ErrCSRFExpired is returned when the csrf token has expired - ErrCSRFExpired = errors.New("csrf token expired") -) - -var csrfSecretKey []byte - -func init() { - csrfSecretKey = cipher.RandByte(csrfSecretLength) -} - -// CSRFToken csrf token -type CSRFToken struct { - Nonce []byte - ExpiresAt time.Time -} - -// newCSRFToken generates a new CSRF Token -func newCSRFToken() (string, error) { - return newCSRFTokenWithTime(time.Now().Add(CSRFMaxAge)) -} - -func newCSRFTokenWithTime(expiresAt time.Time) (string, error) { - token := &CSRFToken{ - Nonce: cipher.RandByte(csrfNonceLength), - ExpiresAt: expiresAt, - } - - tokenJSON, err := json.Marshal(token) - if err != nil { - return "", err - } - - h := hmac.New(sha256.New, csrfSecretKey) - _, err = h.Write([]byte(tokenJSON)) - if err != nil { - return "", err - } - - sig := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) - - signingString := base64.RawURLEncoding.EncodeToString(tokenJSON) - - return strings.Join([]string{signingString, sig}, "."), nil -} - -// verifyCSRFToken checks validity of the given token -func verifyCSRFToken(headerToken string) error { - tokenParts := strings.Split(headerToken, ".") - if len(tokenParts) != 2 { - return ErrCSRFInvalid - } - - signingString, err := base64.RawURLEncoding.DecodeString(tokenParts[0]) - if err != nil { - return err - } - - h := hmac.New(sha256.New, csrfSecretKey) - _, err = h.Write([]byte(signingString)) - if err != nil { - return err - } - - sig := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) - - if sig != tokenParts[1] { - return ErrCSRFInvalidSignature - } - - var csrfToken CSRFToken - err = json.Unmarshal(signingString, &csrfToken) - if err != nil { - return err - } - - if time.Now().After(csrfToken.ExpiresAt) { - return ErrCSRFExpired - } - - return nil -} - -// Creates a new CSRF token. Previous CSRF tokens are invalidated by this call. -// URI: /api/v1/csrf -// Method: GET -// Response: -// csrf_token: CSRF token to use in POST requests -func getCSRFToken(disabled bool) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - if disabled { - logger.Warning("CSRF check disabled") - wh.Error404(w, "") - return - } - - // generate a new token - csrfToken, err := newCSRFToken() - if err != nil { - logger.Error(err) - wh.Error500(w, fmt.Sprintf("Failed to create a csrf token: %v", err)) - return - } - - wh.SendJSONOr500(logger, w, &map[string]string{"csrf_token": csrfToken}) - } -} - -// CSRFCheck verifies X-CSRF-Token header value -func CSRFCheck(apiVersion string, disabled bool, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !disabled { - switch r.Method { - case http.MethodPost, http.MethodPut, http.MethodDelete: - token := r.Header.Get(CSRFHeaderName) - if err := verifyCSRFToken(token); err != nil { - logger.Errorf("CSRF token invalid: %v", err) - writeError(w, apiVersion, http.StatusForbidden, err.Error()) - return - } - } - } - - handler.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/explorer.go b/vendor/github.com/SkycoinProject/skycoin/src/api/explorer.go deleted file mode 100644 index 115e766..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/explorer.go +++ /dev/null @@ -1,242 +0,0 @@ -package api - -import ( - "fmt" - "net/http" - "strconv" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/readable" - "github.com/SkycoinProject/skycoin/src/util/droplet" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -// CoinSupply records the coin supply info -type CoinSupply struct { - // Coins distributed beyond the project: - CurrentSupply string `json:"current_supply"` - // TotalSupply is CurrentSupply plus coins held by the distribution addresses that are spendable - TotalSupply string `json:"total_supply"` - // MaxSupply is the maximum number of coins to be distributed ever - MaxSupply string `json:"max_supply"` - // CurrentCoinHourSupply is coins hours in non distribution addresses - CurrentCoinHourSupply string `json:"current_coinhour_supply"` - // TotalCoinHourSupply is coin hours in all addresses including unlocked distribution addresses - TotalCoinHourSupply string `json:"total_coinhour_supply"` - // Distribution addresses which count towards total supply - UnlockedAddresses []string `json:"unlocked_distribution_addresses"` - // Distribution addresses which are locked and do not count towards total supply - LockedAddresses []string `json:"locked_distribution_addresses"` -} - -func newAddrSet(addrs []cipher.Address) map[cipher.Address]struct{} { - s := make(map[cipher.Address]struct{}, len(addrs)) - for _, a := range addrs { - s[a] = struct{}{} - } - return s -} - -// coinSupplyHandler returns coin distribution supply stats -// Method: GET -// URI: /api/v1/coinSupply -func coinSupplyHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - allUnspents, err := gateway.GetUnspentOutputsSummary(nil) - if err != nil { - err = fmt.Errorf("gateway.GetUnspentOutputsSummary failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - dist := gateway.VisorConfig().Distribution - - unlockedAddrs := dist.UnlockedAddressesDecoded() - // Search map of unlocked addresses, used to filter unspents - unlockedAddrSet := newAddrSet(unlockedAddrs) - - var unlockedSupply uint64 - // check confirmed unspents only - for _, u := range allUnspents.Confirmed { - // check if address is an unlocked distribution address - if _, ok := unlockedAddrSet[u.Body.Address]; ok { - var err error - unlockedSupply, err = mathutil.AddUint64(unlockedSupply, u.Body.Coins) - if err != nil { - err = fmt.Errorf("uint64 overflow while adding up unlocked supply coins: %v", err) - wh.Error500(w, err.Error()) - return - } - } - } - - // "total supply" is the number of coins unlocked. - // Each distribution address was allocated distribution.AddressInitialBalance coins. - totalSupply := uint64(len(unlockedAddrs)) * dist.AddressInitialBalance() - totalSupply *= droplet.Multiplier - - // "current supply" is the number of coins distributed from the unlocked pool - currentSupply := totalSupply - unlockedSupply - - currentSupplyStr, err := droplet.ToString(currentSupply) - if err != nil { - err = fmt.Errorf("Failed to convert coins to string: %v", err) - wh.Error500(w, err.Error()) - return - } - - totalSupplyStr, err := droplet.ToString(totalSupply) - if err != nil { - err = fmt.Errorf("Failed to convert coins to string: %v", err) - wh.Error500(w, err.Error()) - return - } - - maxSupplyStr, err := droplet.ToString(dist.MaxCoinSupply * droplet.Multiplier) - if err != nil { - err = fmt.Errorf("Failed to convert coins to string: %v", err) - wh.Error500(w, err.Error()) - return - } - - // locked distribution addresses - lockedAddrs := dist.LockedAddressesDecoded() - lockedAddrSet := newAddrSet(lockedAddrs) - - // get total coins hours which excludes locked distribution addresses - var totalCoinHours uint64 - for _, out := range allUnspents.Confirmed { - if _, ok := lockedAddrSet[out.Body.Address]; !ok { - var err error - totalCoinHours, err = mathutil.AddUint64(totalCoinHours, out.CalculatedHours) - if err != nil { - err = fmt.Errorf("uint64 overflow while adding up total coin hours: %v", err) - wh.Error500(w, err.Error()) - return - } - } - } - - // get current coin hours which excludes all distribution addresses - var currentCoinHours uint64 - for _, out := range allUnspents.Confirmed { - // check if address not in locked distribution addresses - if _, ok := lockedAddrSet[out.Body.Address]; !ok { - // check if address not in unlocked distribution addresses - if _, ok := unlockedAddrSet[out.Body.Address]; !ok { - currentCoinHours += out.CalculatedHours - } - } - } - - if err != nil { - err = fmt.Errorf("Failed to get total coinhours: %v", err) - wh.Error500(w, err.Error()) - return - } - - cs := CoinSupply{ - CurrentSupply: currentSupplyStr, - TotalSupply: totalSupplyStr, - MaxSupply: maxSupplyStr, - CurrentCoinHourSupply: strconv.FormatUint(currentCoinHours, 10), - TotalCoinHourSupply: strconv.FormatUint(totalCoinHours, 10), - UnlockedAddresses: dist.UnlockedAddresses(), - LockedAddresses: dist.LockedAddresses(), - } - - wh.SendJSONOr500(logger, w, cs) - } -} - -// Richlist contains top address balances -type Richlist struct { - Richlist []readable.RichlistBalance `json:"richlist"` -} - -// richlistHandler returns the top skycoin holders -// Method: GET -// URI: /richlist?n=${number}&include-distribution=${bool} -// Args: -// n [int, number of results to include] -// include-distribution [bool, include the distribution addresses in the richlist] -func richlistHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - var topn int - topnStr := r.FormValue("n") - if topnStr == "" { - topn = 20 - } else { - var err error - topn, err = strconv.Atoi(topnStr) - if err != nil { - wh.Error400(w, "invalid n") - return - } - } - - var includeDistribution bool - includeDistributionStr := r.FormValue("include-distribution") - if includeDistributionStr == "" { - includeDistribution = false - } else { - var err error - includeDistribution, err = strconv.ParseBool(includeDistributionStr) - if err != nil { - wh.Error400(w, "invalid include-distribution") - return - } - } - - richlist, err := gateway.GetRichlist(includeDistribution) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - if topn > 0 && topn < len(richlist) { - richlist = richlist[:topn] - } - - readableRichlist, err := readable.NewRichlistBalances(richlist) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, Richlist{ - Richlist: readableRichlist, - }) - } -} - -// addressCountHandler returns the total number of unique address that have coins -// Method: GET -// URI: /addresscount -func addressCountHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - addrCount, err := gateway.AddressCount() - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, &map[string]uint64{"count": addrCount}) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/gateway.go b/vendor/github.com/SkycoinProject/skycoin/src/api/gateway.go deleted file mode 100644 index 83c2a7e..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/gateway.go +++ /dev/null @@ -1,120 +0,0 @@ -package api - -import ( - "time" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/kvstorage" - "github.com/SkycoinProject/skycoin/src/transaction" - "github.com/SkycoinProject/skycoin/src/visor" - "github.com/SkycoinProject/skycoin/src/visor/historydb" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -//go:generate mockery -name Gatewayer -case underscore -inpkg -testonly - -// Gateway bundles daemon.Daemon, Visor, wallet.Service and kvstorage.Manager into a single object -type Gateway struct { - *daemon.Daemon - *visor.Visor - *wallet.Service - *kvstorage.Manager -} - -// NewGateway creates a Gateway -func NewGateway(d *daemon.Daemon, v *visor.Visor, w *wallet.Service, m *kvstorage.Manager) *Gateway { - return &Gateway{ - Daemon: d, - Visor: v, - Service: w, - Manager: m, - } -} - -// Gatewayer interface for Gateway methods -type Gatewayer interface { - Daemoner - Visorer - Walleter - Storer -} - -// Daemoner interface for daemon.Daemon methods used by the API -type Daemoner interface { - DaemonConfig() daemon.DaemonConfig - GetConnection(addr string) (*daemon.Connection, error) - GetConnections(f func(c daemon.Connection) bool) ([]daemon.Connection, error) - DisconnectByGnetID(gnetID uint64) error - GetDefaultConnections() []string - GetTrustConnections() []string - GetExchgConnection() []string - GetBlockchainProgress(headSeq uint64) *daemon.BlockchainProgress - InjectBroadcastTransaction(txn coin.Transaction) error - InjectTransaction(txn coin.Transaction) error -} - -// Visorer interface for visor.Visor methods used by the API -type Visorer interface { - VisorConfig() visor.Config - StartedAt() time.Time - HeadBkSeq() (uint64, bool, error) - GetBlockchainMetadata() (*visor.BlockchainMetadata, error) - ResendUnconfirmedTxns() ([]cipher.SHA256, error) - GetSignedBlockByHash(hash cipher.SHA256) (*coin.SignedBlock, error) - GetSignedBlockByHashVerbose(hash cipher.SHA256) (*coin.SignedBlock, [][]visor.TransactionInput, error) - GetSignedBlockBySeq(seq uint64) (*coin.SignedBlock, error) - GetSignedBlockBySeqVerbose(seq uint64) (*coin.SignedBlock, [][]visor.TransactionInput, error) - GetBlocks(seqs []uint64) ([]coin.SignedBlock, error) - GetBlocksVerbose(seqs []uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error) - GetBlocksInRange(start, end uint64) ([]coin.SignedBlock, error) - GetBlocksInRangeVerbose(start, end uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error) - GetLastBlocks(num uint64) ([]coin.SignedBlock, error) - GetLastBlocksVerbose(num uint64) ([]coin.SignedBlock, [][][]visor.TransactionInput, error) - GetUnspentOutputsSummary(filters []visor.OutputsFilter) (*visor.UnspentOutputsSummary, error) - GetBalanceOfAddresses(addrs []cipher.Address) ([]wallet.BalancePair, error) - VerifyTxnVerbose(txn *coin.Transaction, signed visor.TxnSignedFlag) ([]visor.TransactionInput, bool, error) - AddressCount() (uint64, error) - GetUxOutByID(id cipher.SHA256) (*historydb.UxOut, error) - GetSpentOutputsForAddresses(addr []cipher.Address) ([][]historydb.UxOut, error) - GetVerboseTransactionsForAddress(a cipher.Address) ([]visor.Transaction, [][]visor.TransactionInput, error) - GetRichlist(includeDistribution bool) (visor.Richlist, error) - GetAllUnconfirmedTransactions() ([]visor.UnconfirmedTransaction, error) - GetAllUnconfirmedTransactionsVerbose() ([]visor.UnconfirmedTransaction, [][]visor.TransactionInput, error) - GetTransaction(txid cipher.SHA256) (*visor.Transaction, error) - GetTransactionWithInputs(txid cipher.SHA256) (*visor.Transaction, []visor.TransactionInput, error) - GetTransactions(flts []visor.TxFilter) ([]visor.Transaction, error) - GetTransactionsWithInputs(flts []visor.TxFilter) ([]visor.Transaction, [][]visor.TransactionInput, error) - AddressesActivity(addrs []cipher.Address) ([]bool, error) - GetWalletUnconfirmedTransactions(wltID string) ([]visor.UnconfirmedTransaction, error) - GetWalletUnconfirmedTransactionsVerbose(wltID string) ([]visor.UnconfirmedTransaction, [][]visor.TransactionInput, error) - GetWalletBalance(wltID string) (wallet.BalancePair, wallet.AddressBalances, error) - CreateTransaction(p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error) - WalletCreateTransaction(wltID string, p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error) - WalletCreateTransactionSigned(wltID string, password []byte, p transaction.Params, wp visor.CreateTransactionParams) (*coin.Transaction, []visor.TransactionInput, error) - WalletSignTransaction(wltID string, password []byte, txn *coin.Transaction, signIndexes []int) (*coin.Transaction, []visor.TransactionInput, error) -} - -// Walleter interface for wallet.Service methods used by the API -type Walleter interface { - UnloadWallet(wltID string) error - EncryptWallet(wltID string, password []byte) (wallet.Wallet, error) - DecryptWallet(wltID string, password []byte) (wallet.Wallet, error) - GetWalletSeed(wltID string, password []byte) (string, string, error) - CreateWallet(wltName string, options wallet.Options, bg wallet.TransactionsFinder) (wallet.Wallet, error) - RecoverWallet(wltID, seed, seedPassphrase string, password []byte) (wallet.Wallet, error) - NewAddresses(wltID string, password []byte, n uint64) ([]cipher.Address, error) - GetWallet(wltID string) (wallet.Wallet, error) - GetWallets() (wallet.Wallets, error) - UpdateWalletLabel(wltID, label string) error - WalletDir() (string, error) -} - -// Storer interface for kvstorage.Manager methods used by the API -type Storer interface { - GetStorageValue(storageType kvstorage.Type, key string) (string, error) - GetAllStorageValues(storageType kvstorage.Type) (map[string]string, error) - AddStorageValue(storageType kvstorage.Type, key, val string) error - RemoveStorageValue(storageType kvstorage.Type, key string) error -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/health.go b/vendor/github.com/SkycoinProject/skycoin/src/api/health.go deleted file mode 100644 index a10db54..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/health.go +++ /dev/null @@ -1,118 +0,0 @@ -package api - -import ( - "fmt" - "net/http" - "time" - - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -// BlockchainMetadata extends visor.BlockchainMetadata to include the time since the last block -type BlockchainMetadata struct { - readable.BlockchainMetadata - TimeSinceLastBlock wh.Duration `json:"time_since_last_block"` -} - -// HealthResponse is returned by the /health endpoint -type HealthResponse struct { - BlockchainMetadata BlockchainMetadata `json:"blockchain"` - Version readable.BuildInfo `json:"version"` - CoinName string `json:"coin"` - DaemonUserAgent string `json:"user_agent"` - OpenConnections int `json:"open_connections"` - OutgoingConnections int `json:"outgoing_connections"` - IncomingConnections int `json:"incoming_connections"` - Uptime wh.Duration `json:"uptime"` - CSRFEnabled bool `json:"csrf_enabled"` - HeaderCheckEnabled bool `json:"header_check_enabled"` - CSPEnabled bool `json:"csp_enabled"` - WalletAPIEnabled bool `json:"wallet_api_enabled"` - GUIEnabled bool `json:"gui_enabled"` - BlockPublisher bool `json:"block_publisher"` - UserVerifyTxn readable.VerifyTxn `json:"user_verify_transaction"` - UnconfirmedVerifyTxn readable.VerifyTxn `json:"unconfirmed_verify_transaction"` - StartedAt int64 `json:"started_at"` - Fiber readable.FiberConfig `json:"fiber"` -} - -func getHealthData(c muxConfig, gateway Gatewayer) (*HealthResponse, error) { - metadata, err := gateway.GetBlockchainMetadata() - if err != nil { - return nil, fmt.Errorf("gateway.GetBlockchainMetadata failed: %v", err) - } - - conns, err := gateway.GetConnections(func(c daemon.Connection) bool { - return c.State != daemon.ConnectionStatePending - }) - if err != nil { - return nil, fmt.Errorf("gateway.GetConnections failed: %v", err) - } - - outgoingConns := 0 - incomingConns := 0 - for _, c := range conns { - if c.Outgoing { - outgoingConns++ - } else { - incomingConns++ - } - } - - elapsedBlockTime := time.Now().UTC().Unix() - int64(metadata.HeadBlock.Head.Time) - timeSinceLastBlock := time.Second * time.Duration(elapsedBlockTime) - - _, walletAPIEnabled := c.enabledAPISets[EndpointsWallet] - - userAgent, err := c.health.DaemonUserAgent.Build() - if err != nil { - return nil, err - } - - return &HealthResponse{ - BlockchainMetadata: BlockchainMetadata{ - BlockchainMetadata: readable.NewBlockchainMetadata(*metadata), - TimeSinceLastBlock: wh.FromDuration(timeSinceLastBlock), - }, - Version: c.health.BuildInfo, - CoinName: c.health.Fiber.Name, - Fiber: c.health.Fiber, - DaemonUserAgent: userAgent, - OpenConnections: len(conns), - OutgoingConnections: outgoingConns, - IncomingConnections: incomingConns, - CSRFEnabled: !c.disableCSRF, - HeaderCheckEnabled: !c.disableHeaderCheck, - CSPEnabled: !c.disableCSP, - GUIEnabled: c.enableGUI, - BlockPublisher: c.health.BlockPublisher, - WalletAPIEnabled: walletAPIEnabled, - UserVerifyTxn: readable.NewVerifyTxn(params.UserVerifyTxn), - UnconfirmedVerifyTxn: readable.NewVerifyTxn(gateway.DaemonConfig().UnconfirmedVerifyTxn), - Uptime: wh.FromDuration(time.Since(gateway.StartedAt())), - StartedAt: gateway.StartedAt().Unix(), - }, nil -} - -// healthHandler returns node health data -// URI: /api/v1/health -// Method: GET -func healthHandler(c muxConfig, gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - health, err := getHealthData(c, gateway) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, health) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/http.go b/vendor/github.com/SkycoinProject/skycoin/src/api/http.go deleted file mode 100644 index 517b7eb..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/http.go +++ /dev/null @@ -1,703 +0,0 @@ -/* -Package api implements the REST API interface -*/ -package api - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "path/filepath" - "strings" - "time" - "unicode" - - "github.com/NYTimes/gziphandler" - "github.com/rs/cors" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/readable" - "github.com/SkycoinProject/skycoin/src/util/file" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -var ( - logger = logging.MustGetLogger("api") -) - -const ( - resourceDir = "dist/" - devDir = "dev/" - indexPage = "index.html" - - apiVersion1 = "v1" - apiVersion2 = "v2" - - defaultReadTimeout = time.Second * 10 - defaultWriteTimeout = time.Second * 60 - defaultIdleTimeout = time.Second * 120 - - // EndpointsRead endpoints with no side-effects and no changes in node state - EndpointsRead = "READ" - // EndpointsStatus endpoints offer (meta,runtime)data to dashboard and monitoring clients - EndpointsStatus = "STATUS" - // EndpointsTransaction endpoints export operations on transactions that modify node state - EndpointsTransaction = "TXN" - // EndpointsWallet endpoints implement wallet interface - EndpointsWallet = "WALLET" - // EndpointsInsecureWalletSeed endpoints implement wallet interface - EndpointsInsecureWalletSeed = "INSECURE_WALLET_SEED" - // EndpointsPrometheus endpoints for Go application metrics - EndpointsPrometheus = "PROMETHEUS" - // EndpointsNetCtrl endpoints for managing network connections - EndpointsNetCtrl = "NET_CTRL" - // EndpointsStorage endpoints implement interface for key-value storage for arbitrary data - EndpointsStorage = "STORAGE" -) - -// Server exposes an HTTP API -type Server struct { - server *http.Server - listener net.Listener - done chan struct{} -} - -// Config configures Server -type Config struct { - StaticDir string - DisableCSRF bool - DisableHeaderCheck bool - DisableCSP bool - EnableGUI bool - ReadTimeout time.Duration - WriteTimeout time.Duration - IdleTimeout time.Duration - Health HealthConfig - HostWhitelist []string - EnabledAPISets map[string]struct{} - Username string - Password string -} - -// HealthConfig configuration data exposed in /health -type HealthConfig struct { - BuildInfo readable.BuildInfo - Fiber readable.FiberConfig - DaemonUserAgent useragent.Data - BlockPublisher bool -} - -type muxConfig struct { - host string - appLoc string - enableGUI bool - disableCSRF bool - disableHeaderCheck bool - disableCSP bool - enabledAPISets map[string]struct{} - hostWhitelist []string - username string - password string - health HealthConfig -} - -// HTTPResponse represents the http response struct -type HTTPResponse struct { - Error *HTTPError `json:"error,omitempty"` - Data interface{} `json:"data,omitempty"` -} - -// HTTPError is included in an HTTPResponse -type HTTPError struct { - Message string `json:"message"` - Code int `json:"code"` -} - -// NewHTTPErrorResponse returns an HTTPResponse with the Error field populated -func NewHTTPErrorResponse(code int, msg string) HTTPResponse { - if msg == "" { - msg = http.StatusText(code) - } - - return HTTPResponse{ - Error: &HTTPError{ - Code: code, - Message: msg, - }, - } -} - -func writeHTTPResponse(w http.ResponseWriter, resp HTTPResponse) { - out, err := json.MarshalIndent(resp, "", " ") - if err != nil { - wh.Error500(w, "json.MarshalIndent failed") - return - } - - w.Header().Add("Content-Type", ContentTypeJSON) - - if resp.Error == nil { - w.WriteHeader(http.StatusOK) - } else { - if resp.Error.Code < 400 || resp.Error.Code >= 600 { - logger.Critical().Errorf("writeHTTPResponse invalid error status code: %d", resp.Error.Code) - w.WriteHeader(http.StatusInternalServerError) - } else { - w.WriteHeader(resp.Error.Code) - } - } - - if _, err := w.Write(out); err != nil { - logger.WithError(err).Error("http Write failed") - } -} - -func create(host string, c Config, gateway Gatewayer) (*Server, error) { - var appLoc string - if c.EnableGUI { - var err error - appLoc, err = file.DetermineResourcePath(c.StaticDir, resourceDir, devDir) - if err != nil { - return nil, err - } - logger.Infof("Web resources directory: %s", appLoc) - } - - if c.DisableCSRF { - logger.Warning("CSRF check disabled") - } - - if c.DisableHeaderCheck { - logger.Warning("Header check disabled") - } - - if c.ReadTimeout == 0 { - c.ReadTimeout = defaultReadTimeout - } - if c.WriteTimeout == 0 { - c.WriteTimeout = defaultWriteTimeout - } - if c.IdleTimeout == 0 { - c.IdleTimeout = defaultIdleTimeout - } - - mc := muxConfig{ - host: host, - appLoc: appLoc, - enableGUI: c.EnableGUI, - disableCSRF: c.DisableCSRF, - disableHeaderCheck: c.DisableHeaderCheck, - disableCSP: c.DisableCSP, - health: c.Health, - enabledAPISets: c.EnabledAPISets, - hostWhitelist: c.HostWhitelist, - username: c.Username, - password: c.Password, - } - - srvMux := newServerMux(mc, gateway) - srv := &http.Server{ - Handler: srvMux, - ReadTimeout: c.ReadTimeout, - WriteTimeout: c.WriteTimeout, - IdleTimeout: c.IdleTimeout, - // MaxHeaderBytes: http.DefaultMaxHeaderBytes, // adjust this to allow longer GET queries - } - - return &Server{ - server: srv, - done: make(chan struct{}), - }, nil -} - -// Create creates a new Server instance that listens on HTTP -func Create(host string, c Config, gateway Gatewayer) (*Server, error) { - logger.Warning("HTTPS not in use!") - - listener, err := net.Listen("tcp", host) - if err != nil { - return nil, err - } - - // If the host did not specify a port, allowing the kernel to assign one, - // we need to get the assigned address to know the full hostname - host = listener.Addr().String() - - s, err := create(host, c, gateway) - if err != nil { - if closeErr := s.listener.Close(); closeErr != nil { - logger.WithError(err).Warning("s.listener.Close() error") - } - return nil, err - } - - s.listener = listener - - return s, nil -} - -// CreateHTTPS creates a new Server instance that listens on HTTPS -func CreateHTTPS(host string, c Config, gateway Gatewayer, certFile, keyFile string) (*Server, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - - logger.Infof("Using %s for the certificate", certFile) - logger.Infof("Using %s for the key", keyFile) - - listener, err := tls.Listen("tcp", host, &tls.Config{ - Certificates: []tls.Certificate{cert}, - }) - if err != nil { - return nil, err - } - - // If the host did not specify a port, allowing the kernel to assign one, - // we need to get the assigned address to know the full hostname - host = listener.Addr().String() - - s, err := create(host, c, gateway) - if err != nil { - if closeErr := s.listener.Close(); closeErr != nil { - logger.WithError(err).Warning("s.listener.Close() error") - } - return nil, err - } - - s.listener = listener - - return s, nil -} - -// Addr returns the listening address of the Server -func (s *Server) Addr() string { - if s == nil || s.listener == nil { - return "" - } - return s.listener.Addr().String() -} - -// Serve serves the web interface on the configured host -func (s *Server) Serve() error { - logger.Infof("Starting web interface on %s", s.listener.Addr()) - defer logger.Info("Web interface closed") - defer close(s.done) - - if err := s.server.Serve(s.listener); err != nil { - if err != http.ErrServerClosed { - return err - } - } - return nil -} - -// Shutdown closes the HTTP service. This can only be called after Serve or ServeHTTPS has been called. -func (s *Server) Shutdown() { - if s == nil { - return - } - - logger.Info("Shutting down web interface") - defer logger.Info("Web interface shut down") - if err := s.listener.Close(); err != nil { - logger.WithError(err).Warning("s.listener.Close() error") - } - <-s.done -} - -// newServerMux creates an http.ServeMux with handlers registered -func newServerMux(c muxConfig, gateway Gatewayer) *http.ServeMux { - mux := http.NewServeMux() - - allowedOrigins := []string{fmt.Sprintf("http://%s", c.host)} - for _, s := range c.hostWhitelist { - allowedOrigins = append(allowedOrigins, fmt.Sprintf("http://%s", s)) - } - - corsHandler := cors.New(cors.Options{ - AllowedOrigins: allowedOrigins, - Debug: false, - AllowedMethods: []string{http.MethodGet, http.MethodPost}, - AllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", CSRFHeaderName}, - AllowCredentials: false, // credentials are not used, but it would be safe to enable if necessary - OptionsPassthrough: false, - }) - - headerCheck := func(apiVersion, host string, hostWhitelist []string, handler http.Handler) http.Handler { - handler = originRefererCheck(apiVersion, host, hostWhitelist, handler) - handler = hostCheck(apiVersion, host, hostWhitelist, handler) - return handler - } - - forMethodAPISets := func(apiVersion string, f http.Handler, methodsAPISets map[string][]string) http.Handler { - if len(methodsAPISets) == 0 { - logger.Panic("methodsAPISets should not be empty") - } - - switch apiVersion { - case apiVersion1, apiVersion2: - default: - logger.Panicf("Invalid API version %q", apiVersion) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - apiSets := methodsAPISets[r.Method] - - // If no API sets are specified for a given method, return 405 Method Not Allowed - if len(apiSets) == 0 { - switch apiVersion { - case apiVersion1: - wh.Error405(w) - case apiVersion2: - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - } - return - } - - for _, k := range apiSets { - if _, ok := c.enabledAPISets[k]; ok { - f.ServeHTTP(w, r) - return - } - } - - switch apiVersion { - case apiVersion1: - wh.Error403(w, "Endpoint is disabled") - case apiVersion2: - resp := NewHTTPErrorResponse(http.StatusForbidden, "Endpoint is disabled") - writeHTTPResponse(w, resp) - } - }) - } - - webHandlerWithOptionals := func(apiVersion, endpoint string, handlerFunc http.Handler, checkCSRF, checkHeaders bool) { - handler := wh.ElapsedHandler(logger, handlerFunc) - - handler = corsHandler.Handler(handler) - - if checkCSRF { - handler = CSRFCheck(apiVersion, c.disableCSRF, handler) - } - - if checkHeaders { - handler = headerCheck(apiVersion, c.host, c.hostWhitelist, handler) - } - - if apiVersion == apiVersion2 { - handler = ContentTypeJSONRequired(handler) - } - - handler = basicAuth(apiVersion, c.username, c.password, "skycoin daemon", handler) - handler = gziphandler.GzipHandler(handler) - mux.Handle(endpoint, handler) - } - - webHandler := func(apiVersion, endpoint string, handler http.Handler, methodAPISets map[string][]string) { - // methodAPISets can be nil to ignore the concept of API sets for an endpoint. It will always be enabled. - // Explicitly check nil, caller should not pass empty initialized map - if methodAPISets != nil { - handler = forMethodAPISets(apiVersion, handler, methodAPISets) - } - - webHandlerWithOptionals(apiVersion, endpoint, handler, true, !c.disableHeaderCheck) - } - - webHandlerV1 := func(endpoint string, handler http.Handler, methodAPISets map[string][]string) { - webHandler(apiVersion1, "/api/v1"+endpoint, handler, methodAPISets) - } - - webHandlerV2 := func(endpoint string, handler http.Handler, methodAPISets map[string][]string) { - webHandler(apiVersion2, "/api/v2"+endpoint, handler, methodAPISets) - } - - indexHandler := newIndexHandler(c.appLoc, c.enableGUI) - if !c.disableCSP { - indexHandler = CSPHandler(indexHandler, ContentSecurityPolicy) - } - webHandler(apiVersion1, "/", indexHandler, nil) - - if c.enableGUI { - fileInfos, err := ioutil.ReadDir(c.appLoc) - if err != nil { - logger.WithError(err).Panicf("ioutil.ReadDir(%s) failed", c.appLoc) - } - - fs := http.FileServer(http.Dir(c.appLoc)) - if !c.disableCSP { - fs = CSPHandler(fs, ContentSecurityPolicy) - } - - for _, fileInfo := range fileInfos { - route := fmt.Sprintf("/%s", fileInfo.Name()) - if fileInfo.IsDir() { - route = route + "/" - } - - webHandler(apiVersion1, route, fs, nil) - } - } - - // get the current CSRF token - csrfHandlerV1 := func(endpoint string, handler http.Handler) { - webHandlerWithOptionals(apiVersion1, "/api/v1"+endpoint, handler, false, !c.disableHeaderCheck) - } - csrfHandlerV1("/csrf", getCSRFToken(c.disableCSRF)) // csrf is always available, regardless of the API set - - // Status endpoints - webHandlerV1("/version", versionHandler(c.health.BuildInfo), nil) // version is always available, regardless of the API set - webHandlerV1("/health", healthHandler(c, gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - - // Wallet endpoints - webHandlerV1("/wallet", walletHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/create", walletCreateHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/newAddress", walletNewAddressesHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/balance", walletBalanceHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/transaction", walletCreateTransactionHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV2("/wallet/transaction/sign", walletSignTransactionHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/transactions", walletTransactionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/update", walletUpdateHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallets", walletsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallets/folderName", walletFolderHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/newSeed", newSeedHandler(), map[string][]string{ - http.MethodGet: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/seed", walletSeedHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsInsecureWalletSeed}, - }) - webHandlerV2("/wallet/seed/verify", http.HandlerFunc(walletVerifySeedHandler), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - - webHandlerV1("/wallet/unload", walletUnloadHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/encrypt", walletEncryptHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV1("/wallet/decrypt", walletDecryptHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - webHandlerV2("/wallet/recover", walletRecoverHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsWallet}, - }) - - // Blockchain interface - webHandlerV1("/blockchain/metadata", blockchainMetadataHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/blockchain/progress", blockchainProgressHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/block", blockHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV1("/blocks", blocksHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - http.MethodPost: []string{EndpointsRead}, - }) - webHandlerV1("/last_blocks", lastBlocksHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - - // Network stats endpoints - webHandlerV1("/network/connection", connectionHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/network/connections", connectionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/network/defaultConnections", defaultConnectionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/network/connections/trust", trustConnectionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - webHandlerV1("/network/connections/exchange", exchgConnectionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead, EndpointsStatus}, - }) - - // Network admin endpoints - webHandlerV1("/network/connection/disconnect", disconnectHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsNetCtrl}, - }) - - // Transaction related endpoints - webHandlerV1("/pendingTxs", pendingTxnsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV1("/transaction", transactionHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV2("/transaction", transactionHandlerV2(gateway), map[string][]string{ - // http.MethodGet: []string{EndpointsRead}, - http.MethodPost: []string{EndpointsTransaction}, - }) - webHandlerV2("/transaction/verify", verifyTxnHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsRead}, - }) - webHandlerV1("/transactions", transactionsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - http.MethodPost: []string{EndpointsRead}, - }) - webHandlerV1("/injectTransaction", injectTransactionHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsTransaction, EndpointsWallet}, - }) - webHandlerV1("/resendUnconfirmedTxns", resendUnconfirmedTxnsHandler(gateway), map[string][]string{ - http.MethodPost: []string{EndpointsTransaction, EndpointsWallet}, - }) - webHandlerV1("/rawtx", rawTxnHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - - // Unspent output related endpoints - webHandlerV1("/outputs", outputsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - http.MethodPost: []string{EndpointsRead}, - }) - webHandlerV1("/balance", balanceHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - http.MethodPost: []string{EndpointsRead}, - }) - webHandlerV1("/uxout", uxOutHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV1("/address_uxouts", addrUxOutsHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - - // golang process internal metrics for Prometheus - webHandlerV2("/metrics", metricsHandler(c, gateway), map[string][]string{ - http.MethodGet: []string{EndpointsPrometheus}, - }) - - // Address related endpoints - webHandlerV2("/address/verify", http.HandlerFunc(addressVerifyHandler), map[string][]string{ - http.MethodPost: []string{EndpointsRead}, - }) - - // Explorer endpoints - webHandlerV1("/coinSupply", coinSupplyHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV1("/richlist", richlistHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - webHandlerV1("/addresscount", addressCountHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsRead}, - }) - - // Storage endpoint - webHandlerV2("/data", storageHandler(gateway), map[string][]string{ - http.MethodGet: []string{EndpointsStorage}, - http.MethodPost: []string{EndpointsStorage}, - http.MethodDelete: []string{EndpointsStorage}, - }) - - return mux -} - -// newIndexHandler returns a http.Handler for index.html, where index.html is in appLoc -func newIndexHandler(appLoc string, enableGUI bool) http.Handler { - // Serves the main page - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !enableGUI { - wh.Error404(w, "") - return - } - - if r.URL.Path != "/" { - wh.Error404(w, "") - return - } - - if r.URL.Path == "/" { - page := filepath.Join(appLoc, indexPage) - logger.Debugf("Serving index page: %s", page) - http.ServeFile(w, r, page) - } - }) -} - -// splitCommaString splits a string separated by commas or whitespace into tokens -// and returns an array of unique tokens split from that string -func splitCommaString(s string) []string { - words := strings.FieldsFunc(s, func(r rune) bool { - return r == ',' || unicode.IsSpace(r) - }) - - // Deduplicate - var dedupWords []string - wordsMap := make(map[string]struct{}) - for _, w := range words { - if _, ok := wordsMap[w]; !ok { - dedupWords = append(dedupWords, w) - } - wordsMap[w] = struct{}{} - } - - return dedupWords -} - -// parseAddressesFromStr parses comma-separated addresses string into []cipher.Address -func parseAddressesFromStr(s string) ([]cipher.Address, error) { - addrsStr := splitCommaString(s) - - addrs := make([]cipher.Address, len(addrsStr)) - for i, s := range addrsStr { - a, err := cipher.DecodeBase58Address(s) - if err != nil { - return nil, fmt.Errorf("address %q is invalid: %v", s, err) - } - - addrs[i] = a - } - - return addrs, nil -} - -// parseAddressesFromStr parses comma-separated hashes string into []cipher.SHA256 -func parseHashesFromStr(s string) ([]cipher.SHA256, error) { - hashesStr := splitCommaString(s) - - hashes := make([]cipher.SHA256, len(hashesStr)) - for i, s := range hashesStr { - h, err := cipher.SHA256FromHex(s) - if err != nil { - return nil, fmt.Errorf("SHA256 hash %q is invalid: %v", s, err) - } - - hashes[i] = h - } - - return hashes, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/metrics.go b/vendor/github.com/SkycoinProject/skycoin/src/api/metrics.go deleted file mode 100644 index fd73b90..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/metrics.go +++ /dev/null @@ -1,85 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -var ( - promUnspents = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "unspent_outputs", - Help: "Number of unspent outputs", - }) - promUnconfirmedTxns = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "unconfirmed_txns", - Help: "Number of unconfirmed transactions", - }) - promTimeSinceLastBlock = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "time_since_last_block_seconds", - Help: "Time since the last block created", - }) - promOpenConns = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "open_connections", - Help: "Number of open connections", - }) - promOutgoingConns = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "outgoing_connections", - Help: "Number of outgoing connections", - }) - promIncomingConns = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "incoming_connections", - Help: "Number of incoming connections", - }) - promStartedAt = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "started_at", - Help: "Node start time, in unixtime", - }) - promLastBlockSeq = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "last_block_seq", - Help: "Last block sequence number", - }) -) - -func init() { - prometheus.MustRegister(promUnspents) - prometheus.MustRegister(promUnconfirmedTxns) - prometheus.MustRegister(promTimeSinceLastBlock) - prometheus.MustRegister(promOpenConns) - prometheus.MustRegister(promOutgoingConns) - prometheus.MustRegister(promIncomingConns) - prometheus.MustRegister(promStartedAt) - prometheus.MustRegister(promLastBlockSeq) -} - -func metricsHandler(c muxConfig, gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - health, err := getHealthData(c, gateway) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - promUnspents.Set(float64(health.BlockchainMetadata.Unspents)) - promUnconfirmedTxns.Set(float64(health.BlockchainMetadata.Unconfirmed)) - promTimeSinceLastBlock.Set(health.BlockchainMetadata.TimeSinceLastBlock.Seconds()) - promOpenConns.Set(float64(health.OpenConnections)) - promOutgoingConns.Set(float64(health.OutgoingConnections)) - promIncomingConns.Set(float64(health.IncomingConnections)) - promStartedAt.Set(float64(gateway.StartedAt().Unix())) - promLastBlockSeq.Set(float64(health.BlockchainMetadata.Head.BkSeq)) - - promhttp.Handler().ServeHTTP(w, r) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/middleware.go b/vendor/github.com/SkycoinProject/skycoin/src/api/middleware.go deleted file mode 100644 index 357883b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/middleware.go +++ /dev/null @@ -1,208 +0,0 @@ -package api - -import ( - "crypto/subtle" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/SkycoinProject/skycoin/src/cipher" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/util/iputil" -) - -// ContentSecurityPolicy represents the value of content-security-policy -// header in http response -const ContentSecurityPolicy = "default-src 'self'" + - "; connect-src 'self' https://api.coinpaprika.com https://swaplab.cc https://version.skycoin.com https://downloads.skycoin.com http://127.0.0.1:9510" + - "; img-src 'self' 'unsafe-inline' data:" + - "; style-src 'self' 'unsafe-inline'" + - "; object-src 'none'" + - "; form-action 'none'" + - "; frame-ancestors 'none'" + - "; block-all-mixed-content" + - "; base-uri 'self'" - -// CSPHandler sets the Content-Security-Policy header -func CSPHandler(handler http.Handler, policy string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Security-Policy", policy) - handler.ServeHTTP(w, r) - }) -} - -// ContentTypeJSONRequired enforces Content-Type: application/json in a POST request. -// Return 415 Unsupported Media Type if the Content-Type is not application/json, -// in the V2 error format. -func ContentTypeJSONRequired(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodPost { - contentType := r.Header.Get("Content-Type") - if !isContentTypeJSON(contentType) { - resp := NewHTTPErrorResponse(http.StatusUnsupportedMediaType, "") - writeHTTPResponse(w, resp) - return - } - } - - handler.ServeHTTP(w, r) - }) -} - -// isContentTypeJSON returns true if the content type is application/json, -// allowing the content-type string to include extra parameters like charset=utf-8, -// for example `Content-Type: application/json; charset=utf-8` will return true. -func isContentTypeJSON(contentType string) bool { - return contentType == ContentTypeJSON || strings.HasPrefix(contentType, ContentTypeJSON+";") -} - -// HostCheck checks that the request's Host header is 127.0.0.1:$port or localhost:$port -// if the HTTP interface host is also a localhost address. -// This prevents DNS rebinding attacks, where an attacker uses a DNS rebinding service -// to bypass CORS checks. -// If the HTTP interface host is not a localhost address, -// the Host header is not checked. This is considered a public interface. -// If the Host header is not set, it is not checked. -// All major browsers send the Host header as required by the HTTP spec. -// hostWhitelist allows additional Host header values to be accepted. -func HostCheck(host string, hostWhitelist []string, handler http.Handler) http.Handler { - return hostCheck(apiVersion1, host, hostWhitelist, handler) -} - -func hostCheck(apiVersion, host string, hostWhitelist []string, handler http.Handler) http.Handler { - addr := host - var port uint16 - if strings.Contains(host, ":") { - var err error - addr, port, err = iputil.SplitAddr(host) - if err != nil { - logger.Panic(err) - } - } - - isLocalhost := iputil.IsLocalhost(addr) - - if isLocalhost && port == 0 { - logger.Panic("localhost with no port specified is unsupported") - } - - hostWhitelistMap := make(map[string]struct{}, len(hostWhitelist)+2) - for _, k := range hostWhitelist { - hostWhitelistMap[k] = struct{}{} - } - hostWhitelistMap[fmt.Sprintf("127.0.0.1:%d", port)] = struct{}{} - hostWhitelistMap[fmt.Sprintf("localhost:%d", port)] = struct{}{} - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // NOTE: The "Host" header is not in http.Request.Header, it's put in the http.Request.Host field - _, isWhitelisted := hostWhitelistMap[r.Host] - if isLocalhost && r.Host != "" && !isWhitelisted { - logger.Critical().Errorf("Detected DNS rebind attempt - configured-host=%s header-host=%s", host, r.Host) - writeError(w, apiVersion, http.StatusForbidden, "Invalid Host") - return - } - - handler.ServeHTTP(w, r) - }) -} - -// OriginRefererCheck checks the Origin header if present, falling back on Referer. -// The Origin or Referer hostname must match the configured host. -// If neither are present, the request is allowed. All major browsers will set -// at least one of these values. If neither are set, assume it is a request -// from curl/wget. -func OriginRefererCheck(host string, hostWhitelist []string, handler http.Handler) http.Handler { - return originRefererCheck(apiVersion1, host, hostWhitelist, handler) -} - -func originRefererCheck(apiVersion, host string, hostWhitelist []string, handler http.Handler) http.Handler { - hostWhitelistMap := make(map[string]struct{}, len(hostWhitelist)+2) - for _, k := range hostWhitelist { - hostWhitelistMap[k] = struct{}{} - } - - if addr, port, _ := iputil.SplitAddr(host); iputil.IsLocalhost(addr) { //nolint:errcheck - hostWhitelistMap[fmt.Sprintf("127.0.0.1:%d", port)] = struct{}{} - hostWhitelistMap[fmt.Sprintf("localhost:%d", port)] = struct{}{} - } else { - hostWhitelistMap[host] = struct{}{} - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - origin := r.Header.Get("Origin") - referer := r.Header.Get("Referer") - toCheck := origin - toCheckHeader := "Origin" - if toCheck == "" { - toCheck = referer - toCheckHeader = "Referer" - } - - if toCheck != "" { - u, err := url.Parse(toCheck) - if err != nil { - logger.Critical().Errorf("Invalid URL in %s header: %s %v", toCheckHeader, toCheck, err) - writeError(w, apiVersion, http.StatusForbidden, "Invalid URL in Origin or Referer header") - return - } - - if _, isWhitelisted := hostWhitelistMap[u.Host]; !isWhitelisted { - logger.Critical().Errorf("%s header value %s does not match host and is not whitelisted", toCheckHeader, toCheck) - writeError(w, apiVersion, http.StatusForbidden, "Invalid Origin or Referer") - return - } - } - - handler.ServeHTTP(w, r) - }) -} - -func basicAuth(apiVersion, username, password, realm string, f http.Handler) http.HandlerFunc { - needsAuth := username != "" || password != "" - usernamePasswordHash := cipher.SumSHA256(append([]byte(username), []byte(password)...)) - authHeader := fmt.Sprintf("Basic realm=%q", realm) - - return func(w http.ResponseWriter, r *http.Request) { - user, pass, ok := r.BasicAuth() - - if needsAuth { - if !ok { - w.Header().Set("WWW-Authenticate", authHeader) - writeError(w, apiVersion, http.StatusUnauthorized, "") - return - } - - userPassHash := cipher.SumSHA256(append([]byte(user), []byte(pass)...)) - - if subtle.ConstantTimeCompare(userPassHash[:], usernamePasswordHash[:]) != 1 { - w.Header().Set("WWW-Authenticate", authHeader) - writeError(w, apiVersion, http.StatusUnauthorized, "") - return - } - } else { - // If auth is not configured but the request provides auth, reject - // This will avoid a mistake where the daemon is not configured with auth, - // but the client is, and does not realize the daemon is not configured with auth - // because all requests are accepted - if user != "" || pass != "" { - w.Header().Set("WWW-Authenticate", authHeader) - writeError(w, apiVersion, http.StatusUnauthorized, "") - return - } - } - - f.ServeHTTP(w, r) - } -} - -func writeError(w http.ResponseWriter, apiVersion string, code int, msg string) { - switch apiVersion { - case apiVersion1: - wh.ErrorXXX(w, code, msg) - case apiVersion2: - writeHTTPResponse(w, NewHTTPErrorResponse(code, msg)) - default: - wh.Error500(w, "Invalid internal API version") - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/network.go b/vendor/github.com/SkycoinProject/skycoin/src/api/network.go deleted file mode 100644 index 7d3a33a..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/network.go +++ /dev/null @@ -1,228 +0,0 @@ -package api - -// APIs for network-related information - -import ( - "fmt" - "net/http" - "sort" - "strconv" - "strings" - - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -// connectionHandler returns a specific connection -// URI: /api/v1/network/connections -// Method: GET -// Args: -// addr - An IP:Port string -func connectionHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - addr := r.FormValue("addr") - if addr == "" { - wh.Error400(w, "addr is required") - return - } - - c, err := gateway.GetConnection(addr) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - if c == nil { - wh.Error404(w, "") - return - } - - wh.SendJSONOr500(logger, w, readable.NewConnection(c)) - } -} - -// Connections wraps []Connection -type Connections struct { - Connections []readable.Connection `json:"connections"` -} - -// NewConnections copies []daemon.Connection to a struct with json tags -func NewConnections(dconns []daemon.Connection) Connections { - conns := make([]readable.Connection, len(dconns)) - for i, dc := range dconns { - conns[i] = readable.NewConnection(&dc) - } - - return Connections{ - Connections: conns, - } -} - -// connectionsHandler returns all outgoing connections -// URI: /api/v1/network/connections -// Method: GET -// Args: -// states: [optional] comma-separated list of connection states ("pending", "connected" or "introduced"). Defaults to "connected,introduced" -// direction: [optional] "outgoing" or "incoming". If not provided, both are included. -func connectionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - formStates := r.FormValue("states") - statesMap := make(map[daemon.ConnectionState]struct{}, 3) - if formStates != "" { - states := strings.Split(formStates, ",") - for _, s := range states { - switch daemon.ConnectionState(s) { - case daemon.ConnectionStatePending, - daemon.ConnectionStateConnected, - daemon.ConnectionStateIntroduced: - statesMap[daemon.ConnectionState(s)] = struct{}{} - default: - wh.Error400(w, fmt.Sprintf("Invalid state in states. Valid states are %q, %q or %q", daemon.ConnectionStatePending, daemon.ConnectionStateConnected, daemon.ConnectionStateIntroduced)) - return - } - } - } - - // "connected" and "introduced" are the defaults, if not specified - if len(statesMap) == 0 { - statesMap[daemon.ConnectionStateConnected] = struct{}{} - statesMap[daemon.ConnectionStateIntroduced] = struct{}{} - } - - direction := r.FormValue("direction") - switch direction { - case "incoming", "outgoing", "": - default: - wh.Error400(w, "Invalid direction. Valid directions are \"outgoing\" or \"incoming\"") - return - } - - conns, err := gateway.GetConnections(func(c daemon.Connection) bool { - switch direction { - case "outgoing": - if !c.Outgoing { - return false - } - case "incoming": - if c.Outgoing { - return false - } - } - - if _, ok := statesMap[c.State]; !ok { - return false - } - - return true - }) - - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, NewConnections(conns)) - } -} - -// defaultConnectionsHandler returns the list of default hardcoded bootstrap addresses. -// They are not necessarily connected to. -// URI: /api/v1/network/defaultConnections -// Method: GET -func defaultConnectionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - conns := gateway.GetDefaultConnections() - sort.Strings(conns) - - wh.SendJSONOr500(logger, w, conns) - } -} - -// trustConnectionsHandler returns all trusted connections -// In the default configuration, these will be a subset of the default hardcoded bootstrap addresses -// URI: /api/v1/network/trust -// Method: GET -func trustConnectionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - conns := gateway.GetTrustConnections() - sort.Strings(conns) - - wh.SendJSONOr500(logger, w, conns) - } -} - -// exchgConnectionsHandler returns all connections found through peer exchange -// URI: /api/v1/network/exchange -// Method: GET -func exchgConnectionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - conns := gateway.GetExchgConnection() - sort.Strings(conns) - - wh.SendJSONOr500(logger, w, conns) - } -} - -// disconnectHandler disconnects a connection by ID or address -// URI: /api/v1/network/connection/disconnect -// Method: POST -// Args: -// id: ID of the connection -func disconnectHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - formID := r.FormValue("id") - if formID == "" { - wh.Error400(w, "id is required") - return - } - - id, err := strconv.ParseUint(formID, 10, 64) - if err != nil || id == 0 { // gnet IDs are non-zero - wh.Error400(w, "invalid id") - return - } - - if err := gateway.DisconnectByGnetID(uint64(id)); err != nil { - switch err { - case daemon.ErrConnectionNotExist: - wh.Error404(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - wh.SendJSONOr500(logger, w, struct{}{}) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/outputs.go b/vendor/github.com/SkycoinProject/skycoin/src/api/outputs.go deleted file mode 100644 index db6288d..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/outputs.go +++ /dev/null @@ -1,77 +0,0 @@ -package api - -import ( - "fmt" - "net/http" - - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// outputsHandler returns UxOuts filtered by a set of addresses or a set of hashes -// URI: /api/v1/outputs -// Method: GET, POST -// Args: -// addrs: comma-separated list of addresses -// hashes: comma-separated list of uxout hashes -// If neither addrs nor hashes are specificed, return all unspent outputs. -// If only one filter is specified, then return outputs match the filter. -// Both filters cannot be specified. -func outputsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - wh.Error405(w) - return - } - - addrStr := r.FormValue("addrs") - hashStr := r.FormValue("hashes") - - if addrStr != "" && hashStr != "" { - wh.Error400(w, "addrs and hashes cannot be specified together") - return - } - - var filters []visor.OutputsFilter - - if addrStr != "" { - addrs, err := parseAddressesFromStr(addrStr) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if len(addrs) > 0 { - filters = append(filters, visor.FbyAddresses(addrs)) - } - } - - if hashStr != "" { - hashes, err := parseHashesFromStr(hashStr) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if len(hashes) > 0 { - filters = append(filters, visor.FbyHashes(hashes)) - } - } - - summary, err := gateway.GetUnspentOutputsSummary(filters) - if err != nil { - err = fmt.Errorf("gateway.GetUnspentOutputsSummary failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - rSummary, err := readable.NewUnspentOutputsSummary(summary) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rSummary) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/spend.go b/vendor/github.com/SkycoinProject/skycoin/src/api/spend.go deleted file mode 100644 index 94bd61b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/spend.go +++ /dev/null @@ -1,748 +0,0 @@ -package api - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strconv" - - "github.com/shopspring/decimal" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/transaction" - "github.com/SkycoinProject/skycoin/src/util/droplet" - "github.com/SkycoinProject/skycoin/src/util/fee" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/visor" - "github.com/SkycoinProject/skycoin/src/visor/blockdb" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -// CreateTransactionResponse is returned by /wallet/transaction -type CreateTransactionResponse struct { - Transaction CreatedTransaction `json:"transaction"` - EncodedTransaction string `json:"encoded_transaction"` -} - -// NewCreateTransactionResponse creates a CreateTransactionResponse -func NewCreateTransactionResponse(txn *coin.Transaction, inputs []visor.TransactionInput) (*CreateTransactionResponse, error) { - cTxn, err := NewCreatedTransaction(txn, inputs) - if err != nil { - return nil, err - } - - txnHex, err := txn.SerializeHex() - if err != nil { - return nil, err - } - - return &CreateTransactionResponse{ - Transaction: *cTxn, - EncodedTransaction: txnHex, - }, nil -} - -// CreatedTransaction represents a transaction created by /wallet/transaction -type CreatedTransaction struct { - Length uint32 `json:"length"` - Type uint8 `json:"type"` - TxID string `json:"txid"` - InnerHash string `json:"inner_hash"` - Fee string `json:"fee"` - - Sigs []string `json:"sigs"` - In []CreatedTransactionInput `json:"inputs"` - Out []CreatedTransactionOutput `json:"outputs"` -} - -// NewCreatedTransaction returns a CreatedTransaction -func NewCreatedTransaction(txn *coin.Transaction, inputs []visor.TransactionInput) (*CreatedTransaction, error) { - if len(txn.In) != len(inputs) { - return nil, errors.New("len(txn.In) != len(inputs)") - } - - var outputHours uint64 - for _, o := range txn.Out { - var err error - outputHours, err = mathutil.AddUint64(outputHours, o.Hours) - if err != nil { - return nil, err - } - } - - var inputHours uint64 - for _, i := range inputs { - var err error - inputHours, err = mathutil.AddUint64(inputHours, i.CalculatedHours) - if err != nil { - return nil, err - } - } - - if inputHours < outputHours { - return nil, errors.New("inputHours unexpectedly less than output hours") - } - - fee := inputHours - outputHours - - sigs := make([]string, len(txn.Sigs)) - for i, s := range txn.Sigs { - sigs[i] = s.Hex() - } - - txID := txn.Hash() - out := make([]CreatedTransactionOutput, len(txn.Out)) - for i, o := range txn.Out { - co, err := NewCreatedTransactionOutput(o, txID) - if err != nil { - return nil, err - } - out[i] = *co - } - - in := make([]CreatedTransactionInput, len(inputs)) - for i, o := range inputs { - ci, err := NewCreatedTransactionInput(o) - if err != nil { - return nil, err - } - in[i] = *ci - } - - return &CreatedTransaction{ - Length: txn.Length, - Type: txn.Type, - TxID: txID.Hex(), - InnerHash: txn.InnerHash.Hex(), - Fee: fmt.Sprint(fee), - - Sigs: sigs, - In: in, - Out: out, - }, nil -} - -// ToTransaction converts a CreatedTransaction back to a coin.Transaction -func (r *CreatedTransaction) ToTransaction() (*coin.Transaction, error) { - t := coin.Transaction{} - - t.Length = r.Length - t.Type = r.Type - - var err error - t.InnerHash, err = cipher.SHA256FromHex(r.InnerHash) - if err != nil { - return nil, err - } - - sigs := make([]cipher.Sig, len(r.Sigs)) - for i, s := range r.Sigs { - sigs[i], err = cipher.SigFromHex(s) - if err != nil { - return nil, err - } - } - - t.Sigs = sigs - - in := make([]cipher.SHA256, len(r.In)) - for i, n := range r.In { - in[i], err = cipher.SHA256FromHex(n.UxID) - if err != nil { - return nil, err - } - } - - t.In = in - - out := make([]coin.TransactionOutput, len(r.Out)) - for i, o := range r.Out { - addr, err := cipher.DecodeBase58Address(o.Address) - if err != nil { - return nil, err - } - - coins, err := droplet.FromString(o.Coins) - if err != nil { - return nil, err - } - - hours, err := strconv.ParseUint(o.Hours, 10, 64) - if err != nil { - return nil, err - } - - out[i] = coin.TransactionOutput{ - Address: addr, - Coins: coins, - Hours: hours, - } - } - - t.Out = out - - hash, err := cipher.SHA256FromHex(r.TxID) - if err != nil { - return nil, err - } - - if t.Hash() != hash { - return nil, fmt.Errorf("readable.Transaction.Hash %s does not match parsed transaction hash %s", t.Hash().Hex(), hash.Hex()) - } - - return &t, nil -} - -// CreatedTransactionOutput is a transaction output -type CreatedTransactionOutput struct { - UxID string `json:"uxid"` - Address string `json:"address"` - Coins string `json:"coins"` - Hours string `json:"hours"` -} - -// NewCreatedTransactionOutput creates CreatedTransactionOutput -func NewCreatedTransactionOutput(out coin.TransactionOutput, txid cipher.SHA256) (*CreatedTransactionOutput, error) { - coins, err := droplet.ToString(out.Coins) - if err != nil { - return nil, err - } - - return &CreatedTransactionOutput{ - UxID: out.UxID(txid).Hex(), - Address: out.Address.String(), - Coins: coins, - Hours: fmt.Sprint(out.Hours), - }, nil -} - -// CreatedTransactionInput is a verbose transaction input -type CreatedTransactionInput struct { - UxID string `json:"uxid"` - Address string `json:"address,omitempty"` - Coins string `json:"coins,omitempty"` - Hours string `json:"hours,omitempty"` - CalculatedHours string `json:"calculated_hours,omitempty"` - Time uint64 `json:"timestamp,omitempty"` - Block uint64 `json:"block,omitempty"` - TxID string `json:"txid,omitempty"` -} - -// NewCreatedTransactionInput creates CreatedTransactionInput -func NewCreatedTransactionInput(out visor.TransactionInput) (*CreatedTransactionInput, error) { - coins, err := droplet.ToString(out.UxOut.Body.Coins) - if err != nil { - return nil, err - } - - if out.UxOut.Body.SrcTransaction.Null() { - return nil, errors.New("NewCreatedTransactionInput UxOut.SrcTransaction is not initialized") - } - - addr := out.UxOut.Body.Address.String() - hours := fmt.Sprint(out.UxOut.Body.Hours) - calculatedHours := fmt.Sprint(out.CalculatedHours) - txID := out.UxOut.Body.SrcTransaction.Hex() - - return &CreatedTransactionInput{ - UxID: out.UxOut.Hash().Hex(), - Address: addr, - Coins: coins, - Hours: hours, - CalculatedHours: calculatedHours, - Time: out.UxOut.Head.Time, - Block: out.UxOut.Head.BkSeq, - TxID: txID, - }, nil -} - -// createTransactionRequest is sent to POST /api/v2/transaction -type createTransactionRequest struct { - IgnoreUnconfirmed bool `json:"ignore_unconfirmed"` - HoursSelection hoursSelection `json:"hours_selection"` - ChangeAddress *wh.Address `json:"change_address,omitempty"` - To []receiver `json:"to"` - UxOuts []wh.SHA256 `json:"unspents,omitempty"` - Addresses []wh.Address `json:"addresses,omitempty"` -} - -// hoursSelection defines options for hours distribution -type hoursSelection struct { - Type string `json:"type"` - Mode string `json:"mode"` - ShareFactor *decimal.Decimal `json:"share_factor,omitempty"` -} - -// receiver specifies a spend destination -type receiver struct { - Address wh.Address `json:"address"` - Coins wh.Coins `json:"coins"` - Hours *wh.Hours `json:"hours,omitempty"` -} - -// Validate validates createTransactionRequest data -func (r createTransactionRequest) Validate() error { - if r.ChangeAddress != nil && r.ChangeAddress.Null() { - return errors.New("change_address must not be the null address") - } - - switch r.HoursSelection.Type { - case transaction.HoursSelectionTypeAuto: - for i, to := range r.To { - if to.Hours != nil { - return fmt.Errorf("to[%d].hours must not be specified for auto hours_selection.mode", i) - } - } - - switch r.HoursSelection.Mode { - case transaction.HoursSelectionModeShare: - case "": - return errors.New("missing hours_selection.mode") - default: - return errors.New("invalid hours_selection.mode") - } - - case transaction.HoursSelectionTypeManual: - for i, to := range r.To { - if to.Hours == nil { - return fmt.Errorf("to[%d].hours must be specified for manual hours_selection.mode", i) - } - } - - if r.HoursSelection.Mode != "" { - return errors.New("hours_selection.mode cannot be used for manual hours_selection.type") - } - - case "": - return errors.New("missing hours_selection.type") - default: - return errors.New("invalid hours_selection.type") - } - - if r.HoursSelection.ShareFactor == nil { - if r.HoursSelection.Mode == transaction.HoursSelectionModeShare { - return errors.New("missing hours_selection.share_factor when hours_selection.mode is share") - } - } else { - if r.HoursSelection.Mode != transaction.HoursSelectionModeShare { - return errors.New("hours_selection.share_factor can only be used when hours_selection.mode is share") - } - - switch { - case r.HoursSelection.ShareFactor.LessThan(decimal.New(0, 0)): - return errors.New("hours_selection.share_factor cannot be negative") - case r.HoursSelection.ShareFactor.GreaterThan(decimal.New(1, 0)): - return errors.New("hours_selection.share_factor cannot be more than 1") - } - } - - if len(r.UxOuts) != 0 && len(r.Addresses) != 0 { - return errors.New("unspents and addresses cannot be combined") - } - - addressMap := make(map[cipher.Address]struct{}, len(r.Addresses)) - for i, a := range r.Addresses { - if a.Null() { - return fmt.Errorf("addresses[%d] is empty", i) - } - - if _, ok := addressMap[a.Address]; ok { - return errors.New("addresses contains duplicate values") - } - - addressMap[a.Address] = struct{}{} - } - - // Check for duplicate spending uxouts - uxouts := make(map[cipher.SHA256]struct{}, len(r.UxOuts)) - for _, o := range r.UxOuts { - if _, ok := uxouts[o.SHA256]; ok { - return errors.New("unspents contains duplicate values") - } - - uxouts[o.SHA256] = struct{}{} - } - - if len(r.To) == 0 { - return errors.New("to is empty") - } - - for i, to := range r.To { - if to.Address.Null() { - return fmt.Errorf("to[%d].address is empty", i) - } - - if to.Coins == 0 { - return fmt.Errorf("to[%d].coins must not be zero", i) - } - - if to.Coins.Value()%params.UserVerifyTxn.MaxDropletDivisor() != 0 { - return fmt.Errorf("to[%d].coins has too many decimal places", i) - } - } - - // Check for duplicate created outputs, a transaction can't have outputs with - // the same (address, coins, hours) - // Auto mode would distribute hours to the outputs and could hypothetically - // avoid assigning duplicate hours in many cases, but the complexity for doing - // so is very high, so also reject duplicate (address, coins) for auto mode. - outputs := make(map[coin.TransactionOutput]struct{}, len(r.To)) - for _, to := range r.To { - var hours uint64 - if to.Hours != nil { - hours = to.Hours.Value() - } - - txo := coin.TransactionOutput{ - Address: to.Address.Address, - Coins: to.Coins.Value(), - Hours: hours, - } - - if _, ok := outputs[txo]; ok { - return errors.New("to contains duplicate values") - } - - outputs[txo] = struct{}{} - } - - return nil -} - -// TransactionParams converts createTransactionRequest to transaction.Params -func (r createTransactionRequest) TransactionParams() transaction.Params { - to := make([]coin.TransactionOutput, len(r.To)) - for i, t := range r.To { - var hours uint64 - if t.Hours != nil { - hours = t.Hours.Value() - } - - to[i] = coin.TransactionOutput{ - Address: t.Address.Address, - Coins: t.Coins.Value(), - Hours: hours, - } - } - - var changeAddress *cipher.Address - if r.ChangeAddress != nil { - changeAddress = &r.ChangeAddress.Address - } - - return transaction.Params{ - HoursSelection: transaction.HoursSelection{ - Type: r.HoursSelection.Type, - Mode: r.HoursSelection.Mode, - ShareFactor: r.HoursSelection.ShareFactor, - }, - ChangeAddress: changeAddress, - To: to, - } -} - -func (r createTransactionRequest) VisorParams() visor.CreateTransactionParams { - return visor.CreateTransactionParams{ - IgnoreUnconfirmed: r.IgnoreUnconfirmed, - Addresses: r.addresses(), - UxOuts: r.uxOuts(), - } -} - -func (r createTransactionRequest) addresses() []cipher.Address { - if len(r.Addresses) == 0 { - return nil - } - addresses := make([]cipher.Address, len(r.Addresses)) - for i, a := range r.Addresses { - addresses[i] = a.Address - } - return addresses -} - -func (r createTransactionRequest) uxOuts() []cipher.SHA256 { - if len(r.UxOuts) == 0 { - return nil - } - uxouts := make([]cipher.SHA256, len(r.UxOuts)) - for i, o := range r.UxOuts { - uxouts[i] = o.SHA256 - } - return uxouts -} - -// transactionHandlerV2 creates a transaction from provided outputs and parameters -// Method: POST -// URI: /api/v2/transaction -// Args: JSON body -func transactionHandlerV2(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req createTransactionRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if err := req.Validate(); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - // Check that addresses or unspents are not empty - // This is not checked in Validate() because POST /api/v1/wallet/transaction - // allows both to be empty - if len(req.Addresses) == 0 && len(req.UxOuts) == 0 { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "one of addresses or unspents must not be empty") - writeHTTPResponse(w, resp) - return - } - - txn, inputs, err := gateway.CreateTransaction(req.TransactionParams(), req.VisorParams()) - if err != nil { - var resp HTTPResponse - switch err.(type) { - case blockdb.ErrUnspentNotExist, transaction.Error, visor.UserError, wallet.Error: - resp = NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - default: - switch err { - case fee.ErrTxnNoFee, fee.ErrTxnInsufficientCoinHours: - resp = NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - } - writeHTTPResponse(w, resp) - return - } - - txnResp, err := NewCreateTransactionResponse(txn, inputs) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusInternalServerError, fmt.Sprintf("NewCreateTransactionResponse failed: %v", err)) - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: txnResp, - }) - } -} - -// walletCreateTransactionRequest is sent to POST /api/v1/wallet/transaction -type walletCreateTransactionRequest struct { - Unsigned bool `json:"unsigned"` - WalletID string `json:"wallet_id"` - Password string `json:"password"` - createTransactionRequest -} - -// Validate validates walletCreateTransactionRequest data -func (r walletCreateTransactionRequest) Validate() error { - if r.WalletID == "" { - return errors.New("missing wallet_id") - } - - if r.Unsigned && len(r.Password) != 0 { - return errors.New("password must not be used for unsigned transactions") - } - - return r.createTransactionRequest.Validate() -} - -// walletCreateTransactionHandler creates a transaction -// Method: POST -// URI: /api/v1/wallet/transaction -// Args: JSON body -func walletCreateTransactionHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - if !isContentTypeJSON(r.Header.Get("Content-Type")) { - wh.Error415(w) - return - } - - var req walletCreateTransactionRequest - err := json.NewDecoder(r.Body).Decode(&req) - if err != nil { - logger.WithError(err).Error("Invalid create transaction request") - wh.Error400(w, err.Error()) - return - } - - if err := req.Validate(); err != nil { - logger.WithError(err).Error("Invalid create transaction request") - wh.Error400(w, err.Error()) - return - } - - var txn *coin.Transaction - var inputs []visor.TransactionInput - if req.Unsigned { - txn, inputs, err = gateway.WalletCreateTransaction(req.WalletID, req.TransactionParams(), req.VisorParams()) - } else { - txn, inputs, err = gateway.WalletCreateTransactionSigned(req.WalletID, []byte(req.Password), req.TransactionParams(), req.VisorParams()) - } - if err != nil { - switch err.(type) { - case wallet.Error: - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - case wallet.ErrWalletNotExist: - wh.Error404(w, err.Error()) - default: - wh.Error400(w, err.Error()) - } - case blockdb.ErrUnspentNotExist, - transaction.Error, - visor.UserError: - wh.Error400(w, err.Error()) - default: - switch err { - case fee.ErrTxnNoFee, - fee.ErrTxnInsufficientCoinHours: - wh.Error400(w, err.Error()) - default: - wh.Error500(w, err.Error()) - } - } - return - } - - txnResp, err := NewCreateTransactionResponse(txn, inputs) - if err != nil { - err = fmt.Errorf("NewCreateTransactionResponse failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, txnResp) - } -} - -// WalletSignTransactionRequest is the request body object for /api/v2/wallet/transaction/sign -type WalletSignTransactionRequest struct { - WalletID string `json:"wallet_id"` - Password string `json:"password"` - EncodedTransaction string `json:"encoded_transaction"` - SignIndexes []int `json:"sign_indexes"` -} - -// walletSignTransactionHandler signs an unsigned transaction -// Method: POST -// URI: /api/v2/wallet/transaction/sign -// Args: JSON body -func walletSignTransactionHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req WalletSignTransactionRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.WalletID == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "wallet_id is required") - writeHTTPResponse(w, resp) - return - } - - if req.EncodedTransaction == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "encoded_transaction is required") - writeHTTPResponse(w, resp) - return - } - - txn, err := decodeTxn(req.EncodedTransaction) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, fmt.Sprintf("Decode transaction failed: %v", err)) - writeHTTPResponse(w, resp) - return - } - - // Check that number of sign_indexes does not exceed number of inputs - if len(req.SignIndexes) > len(txn.In) { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "Too many values in sign_indexes") - writeHTTPResponse(w, resp) - return - } - - // Check that values in sign_indexes are in the range of txn inputs - for _, i := range req.SignIndexes { - if i < 0 || i >= len(txn.In) { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "Value in sign_indexes exceeds range of transaction inputs array") - writeHTTPResponse(w, resp) - return - } - } - - // Check for duplicate values in sign_indexes - signIndexesMap := make(map[int]struct{}, len(req.SignIndexes)) - for _, i := range req.SignIndexes { - if _, ok := signIndexesMap[i]; ok { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "Duplicate value in sign_indexes") - writeHTTPResponse(w, resp) - return - } - signIndexesMap[i] = struct{}{} - } - - signedTxn, inputs, err := gateway.WalletSignTransaction(req.WalletID, []byte(req.Password), txn, req.SignIndexes) - if err != nil { - var resp HTTPResponse - switch err.(type) { - case wallet.Error: - switch err { - case wallet.ErrWalletNotExist: - resp = NewHTTPErrorResponse(http.StatusNotFound, err.Error()) - case wallet.ErrWalletAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, err.Error()) - default: - resp = NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - } - case visor.ErrTxnViolatesSoftConstraint, - visor.ErrTxnViolatesHardConstraint, - visor.ErrTxnViolatesUserConstraint, - blockdb.ErrUnspentNotExist: - resp = NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - txnResp, err := NewCreateTransactionResponse(signedTxn, inputs) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: txnResp, - }) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/storage.go b/vendor/github.com/SkycoinProject/skycoin/src/api/storage.go deleted file mode 100644 index 9ddf189..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/storage.go +++ /dev/null @@ -1,190 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - - "github.com/SkycoinProject/skycoin/src/kvstorage" -) - -// Dispatches /data endpoint. -// Method: GET, POST, DELETE -// URI: /api/v2/data -func storageHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case http.MethodGet: - getStorageValuesHandler(w, r, gateway) - case http.MethodPost: - addStorageValueHandler(w, r, gateway) - case http.MethodDelete: - removeStorageValueHandler(w, r, gateway) - default: - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - } - } -} - -// serves GET requests for /data enpdoint -func getStorageValuesHandler(w http.ResponseWriter, r *http.Request, gateway Gatewayer) { - storageType := r.FormValue("type") - if storageType == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "type is required") - writeHTTPResponse(w, resp) - return - } - - key := r.FormValue("key") - - if key == "" { - getAllStorageValuesHandler(w, gateway, kvstorage.Type(storageType)) - } else { - getStorageValueHandler(w, gateway, kvstorage.Type(storageType), key) - } -} - -// Returns all existing storage values of a given storage type. -// Args: -// type: storage type to get values from -func getAllStorageValuesHandler(w http.ResponseWriter, gateway Gatewayer, storageType kvstorage.Type) { - data, err := gateway.GetAllStorageValues(kvstorage.Type(storageType)) - if err != nil { - var resp HTTPResponse - switch err { - case kvstorage.ErrStorageAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, "") - case kvstorage.ErrNoSuchStorage: - resp = NewHTTPErrorResponse(http.StatusNotFound, "storage is not loaded") - case kvstorage.ErrUnknownKVStorageType: - resp = NewHTTPErrorResponse(http.StatusBadRequest, "unknown storage") - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: data, - }) -} - -// Returns value from storage of a given type by key. -// Args: -// key: key for a value to be retrieved -func getStorageValueHandler(w http.ResponseWriter, gateway Gatewayer, storageType kvstorage.Type, key string) { - val, err := gateway.GetStorageValue(storageType, key) - if err != nil { - var resp HTTPResponse - switch err { - case kvstorage.ErrStorageAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, "") - case kvstorage.ErrNoSuchStorage: - resp = NewHTTPErrorResponse(http.StatusNotFound, "storage is not loaded") - case kvstorage.ErrUnknownKVStorageType: - resp = NewHTTPErrorResponse(http.StatusBadRequest, "unknown storage") - case kvstorage.ErrNoSuchKey: - resp = NewHTTPErrorResponse(http.StatusNotFound, "") - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: val, - }) -} - -// StorageRequest is the request data for POST /api/v2/data -type StorageRequest struct { - StorageType kvstorage.Type `json:"type"` - Key string `json:"key"` - Val string `json:"val"` -} - -// Adds the value to the storage of a given type -// Args: -// type: storage type -// key: key -// val: value -func addStorageValueHandler(w http.ResponseWriter, r *http.Request, gateway Gatewayer) { - var req StorageRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.StorageType == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "type is required") - writeHTTPResponse(w, resp) - return - } - - if req.Key == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "key is required") - writeHTTPResponse(w, resp) - return - } - - if err := gateway.AddStorageValue(req.StorageType, req.Key, req.Val); err != nil { - var resp HTTPResponse - switch err { - case kvstorage.ErrStorageAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, "") - case kvstorage.ErrNoSuchStorage: - resp = NewHTTPErrorResponse(http.StatusNotFound, "storage is not loaded") - case kvstorage.ErrUnknownKVStorageType: - resp = NewHTTPErrorResponse(http.StatusBadRequest, "unknown storage") - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{}) -} - -// Removes the value by key from the storage of a given type -// Args: -// type: storage type -// key: key -func removeStorageValueHandler(w http.ResponseWriter, r *http.Request, gateway Gatewayer) { - storageType := r.FormValue("type") - if storageType == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "type is required") - writeHTTPResponse(w, resp) - return - } - - key := r.FormValue("key") - if key == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "key is required") - writeHTTPResponse(w, resp) - return - } - - if err := gateway.RemoveStorageValue(kvstorage.Type(storageType), key); err != nil { - var resp HTTPResponse - switch err { - case kvstorage.ErrStorageAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, "") - case kvstorage.ErrNoSuchStorage: - resp = NewHTTPErrorResponse(http.StatusNotFound, "storage is not loaded") - case kvstorage.ErrUnknownKVStorageType: - resp = NewHTTPErrorResponse(http.StatusBadRequest, "unknown storage") - case kvstorage.ErrNoSuchKey: - resp = NewHTTPErrorResponse(http.StatusNotFound, "") - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{}) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/transaction.go b/vendor/github.com/SkycoinProject/skycoin/src/api/transaction.go deleted file mode 100644 index f32ebd7..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/transaction.go +++ /dev/null @@ -1,685 +0,0 @@ -package api - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "net/http" - "sort" - "strconv" - "strings" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// pendingTxnsHandler returns pending (unconfirmed) transactions -// Method: GET -// URI: /api/v1/pendingTxs -// Args: -// verbose: [bool] include verbose transaction input data -func pendingTxnsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - if verbose { - txns, inputs, err := gateway.GetAllUnconfirmedTransactionsVerbose() - if err != nil { - wh.Error500(w, err.Error()) - return - } - - vb, err := readable.NewUnconfirmedTransactionsVerbose(txns, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, vb) - } else { - txns, err := gateway.GetAllUnconfirmedTransactions() - if err != nil { - wh.Error500(w, err.Error()) - return - } - - ret, err := readable.NewUnconfirmedTransactions(txns) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, ret) - } - } -} - -// TransactionEncodedResponse represents the data struct of the response to /api/v1/transaction?encoded=1 -type TransactionEncodedResponse struct { - Status readable.TransactionStatus `json:"status"` - Time uint64 `json:"time"` - EncodedTransaction string `json:"encoded_transaction"` -} - -// transactionHandler returns a transaction identified by its txid hash -// Method: GET -// URI: /api/v1/transaction -// Args: -// txid: transaction hash -// verbose: [bool] include verbose transaction input data -// encoded: [bool] return as a raw encoded transaction -func transactionHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - txid := r.FormValue("txid") - if txid == "" { - wh.Error400(w, "txid is empty") - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - encoded, err := parseBoolFlag(r.FormValue("encoded")) - if err != nil { - wh.Error400(w, "Invalid value for encoded") - return - } - - if verbose && encoded { - wh.Error400(w, "verbose and encoded cannot be combined") - return - } - - h, err := cipher.SHA256FromHex(txid) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if verbose { - txn, inputs, err := gateway.GetTransactionWithInputs(h) - if err != nil { - wh.Error500(w, err.Error()) - return - } - if txn == nil { - wh.Error404(w, "") - return - } - - rTxn, err := readable.NewTransactionWithStatusVerbose(txn, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rTxn) - return - } - - txn, err := gateway.GetTransaction(h) - if err != nil { - wh.Error500(w, err.Error()) - return - } - if txn == nil { - wh.Error404(w, "") - return - } - - if encoded { - txnHex, err := txn.Transaction.SerializeHex() - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, TransactionEncodedResponse{ - EncodedTransaction: txnHex, - Status: readable.NewTransactionStatus(txn.Status), - Time: txn.Time, - }) - return - } - - rTxn, err := readable.NewTransactionWithStatus(txn) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, rTxn) - } -} - -// TransactionsWithStatus array of transaction results -type TransactionsWithStatus struct { - Transactions []readable.TransactionWithStatus `json:"txns"` -} - -// Sort sorts transactions chronologically, using txid for tiebreaking -func (r TransactionsWithStatus) Sort() { - sort.Slice(r.Transactions, func(i, j int) bool { - a := r.Transactions[i] - b := r.Transactions[j] - - if a.Time == b.Time { - return strings.Compare(a.Transaction.Hash, b.Transaction.Hash) < 0 - } - - return a.Time < b.Time - }) -} - -// NewTransactionsWithStatus converts []Transaction to TransactionsWithStatus -func NewTransactionsWithStatus(txns []visor.Transaction) (*TransactionsWithStatus, error) { - txnRlts := make([]readable.TransactionWithStatus, 0, len(txns)) - for _, txn := range txns { - rTxn, err := readable.NewTransactionWithStatus(&txn) - if err != nil { - return nil, err - } - txnRlts = append(txnRlts, *rTxn) - } - - return &TransactionsWithStatus{ - Transactions: txnRlts, - }, nil -} - -// TransactionsWithStatusVerbose array of transaction results -type TransactionsWithStatusVerbose struct { - Transactions []readable.TransactionWithStatusVerbose `json:"txns"` -} - -// Sort sorts transactions chronologically, using txid for tiebreaking -func (r TransactionsWithStatusVerbose) Sort() { - sort.Slice(r.Transactions, func(i, j int) bool { - a := r.Transactions[i] - b := r.Transactions[j] - - if a.Time == b.Time { - return strings.Compare(a.Transaction.Hash, b.Transaction.Hash) < 0 - } - - return a.Time < b.Time - }) -} - -// NewTransactionsWithStatusVerbose converts []Transaction to []TransactionsWithStatusVerbose -func NewTransactionsWithStatusVerbose(txns []visor.Transaction, inputs [][]visor.TransactionInput) (*TransactionsWithStatusVerbose, error) { - if len(txns) != len(inputs) { - return nil, errors.New("NewTransactionsWithStatusVerbose: len(txns) != len(inputs)") - } - - txnRlts := make([]readable.TransactionWithStatusVerbose, len(txns)) - for i, txn := range txns { - rTxn, err := readable.NewTransactionWithStatusVerbose(&txn, inputs[i]) - if err != nil { - return nil, err - } - txnRlts[i] = *rTxn - } - - return &TransactionsWithStatusVerbose{ - Transactions: txnRlts, - }, nil -} - -// Returns transactions that match the filters. -// Method: GET, POST -// URI: /api/v1/transactions -// Args: -// addrs: Comma separated addresses [optional, returns all transactions if no address provided] -// confirmed: Whether the transactions should be confirmed [optional, must be 0 or 1; if not provided, returns all] -// verbose: [bool] include verbose transaction input data -func transactionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - wh.Error405(w) - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - // Gets 'addrs' parameter value - addrs, err := parseAddressesFromStr(r.FormValue("addrs")) - if err != nil { - wh.Error400(w, fmt.Sprintf("parse parameter: 'addrs' failed: %v", err)) - return - } - - // Initialize transaction filters - flts := []visor.TxFilter{visor.NewAddrsFilter(addrs)} - - // Gets the 'confirmed' parameter value - confirmedStr := r.FormValue("confirmed") - if confirmedStr != "" { - confirmed, err := strconv.ParseBool(confirmedStr) - if err != nil { - wh.Error400(w, fmt.Sprintf("invalid 'confirmed' value: %v", err)) - return - } - - flts = append(flts, visor.NewConfirmedTxFilter(confirmed)) - } - - if verbose { - txns, inputs, err := gateway.GetTransactionsWithInputs(flts) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rTxns, err := NewTransactionsWithStatusVerbose(txns, inputs) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rTxns.Sort() - - wh.SendJSONOr500(logger, w, rTxns.Transactions) - } else { - txns, err := gateway.GetTransactions(flts) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rTxns, err := NewTransactionsWithStatus(txns) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - rTxns.Sort() - - wh.SendJSONOr500(logger, w, rTxns.Transactions) - } - } -} - -// InjectTransactionRequest is sent to POST /api/v1/injectTransaction -type InjectTransactionRequest struct { - RawTxn string `json:"rawtx"` - NoBroadcast bool `json:"no_broadcast,omitempty"` -} - -// URI: /api/v1/injectTransaction -// Method: POST -// Content-Type: application/json -// Body: {"rawtx": ""} -// Response: -// 200 - ok, returns the transaction hash in hex as string -// 400 - bad transaction -// 500 - other error -// 503 - network unavailable for broadcasting transaction -func injectTransactionHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - var v InjectTransactionRequest - if err := json.NewDecoder(r.Body).Decode(&v); err != nil { - wh.Error400(w, err.Error()) - return - } - - if v.RawTxn == "" { - wh.Error400(w, "rawtx is required") - return - } - - txn, err := coin.DeserializeTransactionHex(v.RawTxn) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if v.NoBroadcast { - if err := gateway.InjectTransaction(txn); err != nil { - switch err.(type) { - case visor.ErrTxnViolatesUserConstraint, - visor.ErrTxnViolatesHardConstraint, - visor.ErrTxnViolatesSoftConstraint: - wh.Error400(w, err.Error()) - default: - wh.Error500(w, err.Error()) - } - return - } - } else { - if err := gateway.InjectBroadcastTransaction(txn); err != nil { - switch err.(type) { - case visor.ErrTxnViolatesUserConstraint, - visor.ErrTxnViolatesHardConstraint, - visor.ErrTxnViolatesSoftConstraint: - wh.Error400(w, err.Error()) - default: - if daemon.IsBroadcastFailure(err) { - wh.Error503(w, err.Error()) - } else { - wh.Error500(w, err.Error()) - } - } - return - } - } - - wh.SendJSONOr500(logger, w, txn.Hash().Hex()) - } -} - -// ResendResult the result of rebroadcasting transaction -type ResendResult struct { - Txids []string `json:"txids"` -} - -// NewResendResult creates a ResendResult from a list of transaction ID hashes -func NewResendResult(hashes []cipher.SHA256) ResendResult { - txids := make([]string, len(hashes)) - for i, h := range hashes { - txids[i] = h.Hex() - } - return ResendResult{ - Txids: txids, - } -} - -// URI: /api/v1/resendUnconfirmedTxns -// Method: POST -// Broadcasts all unconfirmed transactions from the unconfirmed transaction pool -// Response: -// 200 - ok, returns the transaction hashes that were resent -// 405 - method not POST -// 500 - other error -// 503 - network unavailable for broadcasting transaction -func resendUnconfirmedTxnsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - hashes, err := gateway.ResendUnconfirmedTxns() - if err != nil { - if daemon.IsBroadcastFailure(err) { - wh.Error503(w, err.Error()) - } else { - wh.Error500(w, err.Error()) - } - return - } - - wh.SendJSONOr500(logger, w, NewResendResult(hashes)) - } -} - -// URI: /api/v1/rawtx -// Method: GET -// Args: -// txid: transaction ID hash -// Returns the hex-encoded byte serialization of a transaction. -// The transaction may be confirmed or unconfirmed. -func rawTxnHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - txid := r.FormValue("txid") - if txid == "" { - wh.Error400(w, "txid is empty") - return - } - - h, err := cipher.SHA256FromHex(txid) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - txn, err := gateway.GetTransaction(h) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if txn == nil { - wh.Error404(w, "") - return - } - - txnHex, err := txn.Transaction.SerializeHex() - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, txnHex) - } -} - -// VerifyTransactionRequest represents the data struct of the request for /api/v2/transaction/verify -type VerifyTransactionRequest struct { - Unsigned bool `json:"unsigned"` - EncodedTransaction string `json:"encoded_transaction"` -} - -// VerifyTransactionResponse the response data struct for /api/v2/transaction/verify -type VerifyTransactionResponse struct { - Unsigned bool `json:"unsigned"` - Confirmed bool `json:"confirmed"` - Transaction CreatedTransaction `json:"transaction"` -} - -// Decode and verify an encoded transaction -// Method: POST -// URI: /api/v2/transaction/verify -func verifyTxnHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req VerifyTransactionRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.EncodedTransaction == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "encoded_transaction is required") - writeHTTPResponse(w, resp) - return - } - - txn, err := decodeTxn(req.EncodedTransaction) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, fmt.Sprintf("decode transaction failed: %v", err)) - writeHTTPResponse(w, resp) - return - } - - signed := visor.TxnSigned - if req.Unsigned { - signed = visor.TxnUnsigned - } - - var resp HTTPResponse - inputs, isTxnConfirmed, err := gateway.VerifyTxnVerbose(txn, signed) - if err != nil { - switch err.(type) { - case visor.ErrTxnViolatesSoftConstraint, - visor.ErrTxnViolatesHardConstraint, - visor.ErrTxnViolatesUserConstraint: - resp.Error = &HTTPError{ - Code: http.StatusUnprocessableEntity, - Message: err.Error(), - } - default: - resp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - writeHTTPResponse(w, resp) - return - } - } - - verifyTxnResp := VerifyTransactionResponse{ - Confirmed: isTxnConfirmed, - Unsigned: !txn.IsFullySigned(), - } - - if len(inputs) != len(txn.In) { - inputs = nil - } - verboseTxn, err := newCreatedTransactionFuzzy(txn, inputs) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - writeHTTPResponse(w, resp) - return - } - - verifyTxnResp.Transaction = *verboseTxn - - resp.Data = verifyTxnResp - - if isTxnConfirmed && resp.Error == nil { - resp.Error = &HTTPError{ - Code: http.StatusUnprocessableEntity, - Message: "transaction has been spent", - } - } - - writeHTTPResponse(w, resp) - } -} - -func decodeTxn(encodedTxn string) (*coin.Transaction, error) { - var txn coin.Transaction - b, err := hex.DecodeString(encodedTxn) - if err != nil { - return nil, err - } - - txn, err = coin.DeserializeTransaction(b) - if err != nil { - return nil, err - } - - return &txn, nil -} - -// newCreatedTransactionFuzzy creates a CreatedTransaction but accommodates possibly invalid txn input -func newCreatedTransactionFuzzy(txn *coin.Transaction, inputs []visor.TransactionInput) (*CreatedTransaction, error) { - if len(txn.In) != len(inputs) && len(inputs) != 0 { - return nil, errors.New("len(txn.In) != len(inputs)") - } - - var outputHours uint64 - var feeInvalid bool - for _, o := range txn.Out { - var err error - outputHours, err = mathutil.AddUint64(outputHours, o.Hours) - if err != nil { - feeInvalid = true - } - } - - var inputHours uint64 - for _, i := range inputs { - var err error - inputHours, err = mathutil.AddUint64(inputHours, i.CalculatedHours) - if err != nil { - feeInvalid = true - } - } - - if inputHours < outputHours { - feeInvalid = true - } - - var fee uint64 - if !feeInvalid { - fee = inputHours - outputHours - } - - sigs := make([]string, len(txn.Sigs)) - for i, s := range txn.Sigs { - sigs[i] = s.Hex() - } - - txID := txn.Hash() - out := make([]CreatedTransactionOutput, len(txn.Out)) - for i, o := range txn.Out { - co, err := NewCreatedTransactionOutput(o, txID) - if err != nil { - logger.WithError(err).Error("NewCreatedTransactionOutput failed") - continue - } - out[i] = *co - } - - in := make([]CreatedTransactionInput, len(txn.In)) - if len(inputs) == 0 { - for i, h := range txn.In { - in[i] = CreatedTransactionInput{ - UxID: h.Hex(), - } - } - } else { - for i, o := range inputs { - ci, err := NewCreatedTransactionInput(o) - if err != nil { - logger.WithError(err).Error("NewCreatedTransactionInput failed") - continue - } - in[i] = *ci - } - } - - return &CreatedTransaction{ - Length: txn.Length, - Type: txn.Type, - TxID: txID.Hex(), - InnerHash: txn.InnerHash.Hex(), - Fee: fmt.Sprint(fee), - - Sigs: sigs, - In: in, - Out: out, - }, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/uxout.go b/vendor/github.com/SkycoinProject/skycoin/src/api/uxout.go deleted file mode 100644 index 7eb478f..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/uxout.go +++ /dev/null @@ -1,87 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -// URI: /api/v1/uxout -// Method: GET -// Args: -// uxid: unspent output ID hash -// Returns an unspent output by ID -func uxOutHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - uxid := r.FormValue("uxid") - if uxid == "" { - wh.Error400(w, "uxid is empty") - return - } - - id, err := cipher.SHA256FromHex(uxid) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - uxout, err := gateway.GetUxOutByID(id) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if uxout == nil { - wh.Error404(w, "") - return - } - - wh.SendJSONOr500(logger, w, readable.NewSpentOutput(uxout)) - } -} - -// URI: /api/v1/address_uxouts -// Method: GET -// Args: -// address -// Returns the historical, spent outputs associated with an address -func addrUxOutsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - addr := r.FormValue("address") - if addr == "" { - wh.Error400(w, "address is empty") - return - } - - cipherAddr, err := cipher.DecodeBase58Address(addr) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - uxs, err := gateway.GetSpentOutputsForAddresses([]cipher.Address{cipherAddr}) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - ret := make([]readable.SpentOutput, 0) - for _, u := range uxs { - ret = append(ret, readable.NewSpentOutputs(u)...) - } - - wh.SendJSONOr500(logger, w, ret) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/version.go b/vendor/github.com/SkycoinProject/skycoin/src/api/version.go deleted file mode 100644 index bdbd0d8..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/version.go +++ /dev/null @@ -1,22 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" -) - -// versionHandler returns the application version info -// URI: /api/v1/version -// Method: GET -func versionHandler(bi readable.BuildInfo) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - wh.SendJSONOr500(logger, w, bi) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/api/wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/api/wallet.go deleted file mode 100644 index 5ca4e97..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/api/wallet.go +++ /dev/null @@ -1,986 +0,0 @@ -package api - -// APIs for wallet-related information - -import ( - "encoding/json" - "fmt" - "net/http" - "sort" - "strconv" - - "github.com/SkycoinProject/skycoin/src/cipher/bip39" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/readable" - wh "github.com/SkycoinProject/skycoin/src/util/http" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -// UnconfirmedTxnsResponse contains unconfirmed transaction data -type UnconfirmedTxnsResponse struct { - Transactions []readable.UnconfirmedTransactions `json:"transactions"` -} - -// UnconfirmedTxnsVerboseResponse contains verbose unconfirmed transaction data -type UnconfirmedTxnsVerboseResponse struct { - Transactions []readable.UnconfirmedTransactionVerbose `json:"transactions"` -} - -// BalanceResponse address balance summary struct -type BalanceResponse struct { - readable.BalancePair - Addresses readable.AddressBalances `json:"addresses"` -} - -// WalletResponse wallet response struct for http apis -type WalletResponse struct { - Meta readable.WalletMeta `json:"meta"` - Entries []readable.WalletEntry `json:"entries"` -} - -// NewWalletResponse creates WalletResponse struct from wallet.Wallet -func NewWalletResponse(w wallet.Wallet) (*WalletResponse, error) { - var wr WalletResponse - - wr.Meta.Coin = w.Coin() - wr.Meta.Filename = w.Filename() - wr.Meta.Label = w.Label() - wr.Meta.Type = w.Type() - wr.Meta.Version = w.Version() - wr.Meta.CryptoType = w.CryptoType() - wr.Meta.Encrypted = w.IsEncrypted() - wr.Meta.Timestamp = w.Timestamp() - - switch w.Type() { - case wallet.WalletTypeBip44: - bip44Coin := w.Bip44Coin() - wr.Meta.Bip44Coin = &bip44Coin - case wallet.WalletTypeXPub: - wr.Meta.XPub = w.XPub() - } - - entries := w.GetEntries() - wr.Entries = make([]readable.WalletEntry, len(entries)) - - for i, e := range entries { - wr.Entries[i] = readable.WalletEntry{ - Address: e.Address.String(), - Public: e.Public.Hex(), - } - - switch w.Type() { - // Copy these values to another ref to avoid having a pointer - // to an element of Entry which could affect GC of the Entry, - // which could cause retention/copying of secret data in the Entry. - // This is speculative. I don't know if this matters to the go runtime - case wallet.WalletTypeBip44: - childNumber := e.ChildNumber - wr.Entries[i].ChildNumber = &childNumber - change := e.Change - wr.Entries[i].Change = &change - case wallet.WalletTypeXPub: - childNumber := e.ChildNumber - wr.Entries[i].ChildNumber = &childNumber - } - } - - return &wr, nil -} - -// Returns the wallet's balance, both confirmed and predicted. The predicted -// balance is the confirmed balance minus the pending spends. -// URI: /api/v1/wallet/balance -// Method: GET -// Args: -// id: wallet id [required] -func walletBalanceHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - wltID := r.FormValue("id") - if wltID == "" { - wh.Error400(w, "missing wallet id") - return - } - - walletBalance, addressBalances, err := gateway.GetWalletBalance(wltID) - if err != nil { - logger.Errorf("Get wallet balance failed: %v", err) - switch err { - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - wh.SendJSONOr500(logger, w, BalanceResponse{ - BalancePair: readable.NewBalancePair(walletBalance), - Addresses: readable.NewAddressBalances(addressBalances), - }) - } -} - -// Returns the balance of one or more addresses, both confirmed and predicted. The predicted -// balance is the confirmed balance minus the pending spends. -// URI: /api/v1s/balance -// Method: GET, POST -// Args: -// addrs: command separated list of addresses [required] -func balanceHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - wh.Error405(w) - return - } - - addrsParam := r.FormValue("addrs") - addrs, err := parseAddressesFromStr(addrsParam) - if err != nil { - wh.Error400(w, err.Error()) - return - } - - if len(addrs) == 0 { - wh.Error400(w, "addrs is required") - return - } - - bals, err := gateway.GetBalanceOfAddresses(addrs) - if err != nil { - err = fmt.Errorf("gateway.GetBalanceOfAddresses failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - // create map of address to balance - addressBalances := make(readable.AddressBalances, len(addrs)) - for idx, addr := range addrs { - addressBalances[addr.String()] = readable.NewBalancePair(bals[idx]) - } - - var balance wallet.BalancePair - for _, bal := range bals { - var err error - balance.Confirmed, err = balance.Confirmed.Add(bal.Confirmed) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - balance.Predicted, err = balance.Predicted.Add(bal.Predicted) - if err != nil { - wh.Error500(w, err.Error()) - return - } - } - - wh.SendJSONOr500(logger, w, BalanceResponse{ - BalancePair: readable.NewBalancePair(balance), - Addresses: addressBalances, - }) - } -} - -// Loads wallet from seed, will scan ahead N address and -// load addresses till the last one that have coins. -// URI: /api/v1/wallet/create -// Method: POST -// Args: -// seed: wallet seed [required] -// seed-passphrase: wallet seed passphrase [optional, bip44 type wallet only] -// type: wallet type [required, one of "deterministic", "bip44" or "xpub"] -// bip44-coin: BIP44 coin type [optional, defaults to 8000 (skycoin's coin type), only valid if type is "bip44"] -// xpub: xpub key [required for xpub wallets] -// label: wallet label [required] -// scan: the number of addresses to scan ahead for balances [optional, must be > 0] -// encrypt: bool value, whether encrypt the wallet [optional] -// password: password for encrypting wallet [optional, must be provided if "encrypt" is set] -func walletCreateHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - walletType := r.FormValue("type") - if walletType == "" { - wh.Error400(w, "missing type") - return - } - - seed := r.FormValue("seed") - switch walletType { - case wallet.WalletTypeDeterministic, wallet.WalletTypeBip44: - if seed == "" { - wh.Error400(w, "missing seed") - return - } - } - - label := r.FormValue("label") - if label == "" { - wh.Error400(w, "missing label") - return - } - - password := r.FormValue("password") - defer func() { - password = "" - }() - - var encrypt bool - encryptStr := r.FormValue("encrypt") - if encryptStr != "" { - var err error - encrypt, err = strconv.ParseBool(encryptStr) - if err != nil { - wh.Error400(w, fmt.Sprintf("invalid encrypt value: %v", err)) - return - } - } - - if encrypt && len(password) == 0 { - wh.Error400(w, "missing password") - return - } - - if !encrypt && len(password) > 0 { - wh.Error400(w, "encrypt must be true as password is provided") - return - } - - scanNStr := r.FormValue("scan") - var scanN uint64 = 1 - if scanNStr != "" { - var err error - scanN, err = strconv.ParseUint(scanNStr, 10, 64) - if err != nil { - wh.Error400(w, "invalid scan value") - return - } - } - - if scanN == 0 { - wh.Error400(w, "scan must be > 0") - return - } - - var bip44Coin *bip44.CoinType - bip44CoinStr := r.FormValue("bip44-coin") - if bip44CoinStr != "" { - if walletType != wallet.WalletTypeBip44 { - wh.Error400(w, "bip44-coin is only valid for bip44 type wallets") - return - } - - bip44CoinInt, err := strconv.ParseUint(bip44CoinStr, 10, 32) - if err != nil { - wh.Error400(w, "invalid bip44-coin value") - return - } - - c := bip44.CoinType(bip44CoinInt) - bip44Coin = &c - } - - wlt, err := gateway.CreateWallet("", wallet.Options{ - Seed: seed, - Label: label, - Encrypt: encrypt, - Password: []byte(password), - ScanN: scanN, - Type: walletType, - SeedPassphrase: r.FormValue("seed-passphrase"), - Bip44Coin: bip44Coin, - XPub: r.FormValue("xpub"), - }, gateway) - if err != nil { - switch err.(type) { - case wallet.Error: - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - return - default: - wh.Error400(w, err.Error()) - return - } - default: - wh.Error500(w, err.Error()) - return - } - - } - - rlt, err := NewWalletResponse(wlt) - if err != nil { - wh.Error500(w, err.Error()) - return - } - wh.SendJSONOr500(logger, w, rlt) - } -} - -// Genreates new addresses -// URI: /api/v1/wallet/newAddress -// Method: POST -// Args: -// id: wallet id [required] -// num: number of address need to create [optional, if not set the default value is 1] -// password: wallet password [optional, must be provided if the wallet is encrypted] -func walletNewAddressesHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - wltID := r.FormValue("id") - if wltID == "" { - wh.Error400(w, "missing wallet id") - return - } - - // Compute the number of addresses to create, default is 1 - var n uint64 = 1 - var err error - num := r.FormValue("num") - if num != "" { - n, err = strconv.ParseUint(num, 10, 64) - if err != nil { - wh.Error400(w, "invalid num value") - return - } - } - - password := r.FormValue("password") - defer func() { - password = "" - }() - - addrs, err := gateway.NewAddresses(wltID, []byte(password), n) - if err != nil { - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error400(w, err.Error()) - } - return - } - - var rlt = struct { - Addresses []string `json:"addresses"` - }{} - - for _, a := range addrs { - rlt.Addresses = append(rlt.Addresses, a.String()) - } - - wh.SendJSONOr500(logger, w, rlt) - } -} - -// Update wallet label -// URI: /api/v1/wallet/update -// Method: POST -// Args: -// id: wallet id [required] -// label: the label the wallet will be updated to [required] -func walletUpdateHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - // Update wallet - wltID := r.FormValue("id") - if wltID == "" { - wh.Error400(w, "missing wallet id") - return - } - - label := r.FormValue("label") - if label == "" { - wh.Error400(w, "missing label") - return - } - - if err := gateway.UpdateWalletLabel(wltID, label); err != nil { - logger.Errorf("update wallet label failed: %v", err) - - switch err { - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - wh.SendJSONOr500(logger, w, "success") - } -} - -// Returns a wallet by id -// URI: /api/v1/wallet -// Method: GET -// Args: -// id: wallet id [required] -func walletHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - wltID := r.FormValue("id") - if wltID == "" { - wh.Error400(w, fmt.Sprintf("missing wallet id")) - return - } - - wlt, err := gateway.GetWallet(wltID) - if err != nil { - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error400(w, err.Error()) - } - return - } - rlt, err := NewWalletResponse(wlt) - if err != nil { - wh.Error500(w, err.Error()) - return - } - wh.SendJSONOr500(logger, w, rlt) - } -} - -// walletTransactionsHandler returns all unconfirmed transactions for all addresses in a given wallet -// URI: /api/v1/wallet/transactions -// Method: GET -// Args: -// id: wallet id [required] -// verbose: [bool] include verbose transaction input data -func walletTransactionsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - verbose, err := parseBoolFlag(r.FormValue("verbose")) - if err != nil { - wh.Error400(w, "Invalid value for verbose") - return - } - - wltID := r.FormValue("id") - if wltID == "" { - wh.Error400(w, "missing wallet id") - return - } - - handleWalletError := func(err error) { - switch err { - case nil: - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - } - - if verbose { - txns, inputs, err := gateway.GetWalletUnconfirmedTransactionsVerbose(wltID) - if err != nil { - logger.Errorf("get wallet unconfirmed transactions verbose failed: %v", err) - handleWalletError(err) - return - } - - vb := make([]readable.UnconfirmedTransactionVerbose, len(txns)) - for i, txn := range txns { - v, err := readable.NewUnconfirmedTransactionVerbose(&txn, inputs[i]) - if err != nil { - wh.Error500(w, err.Error()) - return - } - vb[i] = *v - } - - wh.SendJSONOr500(logger, w, UnconfirmedTxnsVerboseResponse{ - Transactions: vb, - }) - } else { - txns, err := gateway.GetWalletUnconfirmedTransactions(wltID) - if err != nil { - logger.Errorf("get wallet unconfirmed transactions failed: %v", err) - handleWalletError(err) - return - } - - unconfirmedTxns, err := readable.NewUnconfirmedTransactions(txns) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wh.SendJSONOr500(logger, w, UnconfirmedTxnsResponse{ - Transactions: unconfirmedTxns, - }) - } - } -} - -// Returns all loaded wallets -// URI: /api/v1/wallets -// Method: GET -func walletsHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - wlts, err := gateway.GetWallets() - if err != nil { - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - wrs := make([]*WalletResponse, 0, len(wlts)) - for _, wlt := range wlts { - wr, err := NewWalletResponse(wlt) - if err != nil { - wh.Error500(w, err.Error()) - return - } - - wrs = append(wrs, wr) - } - - sort.Slice(wrs, func(i, j int) bool { - return wrs[i].Meta.Timestamp < wrs[j].Meta.Timestamp - }) - - wh.SendJSONOr500(logger, w, wrs) - } -} - -// WalletFolder struct -type WalletFolder struct { - Address string `json:"address"` -} - -// Returns the wallet directory path -// URI: /api/v1/wallets/folderName -// Method: GET -func walletFolderHandler(s Walleter) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - addr, err := s.WalletDir() - if err != nil { - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - ret := WalletFolder{ - Address: addr, - } - wh.SendJSONOr500(logger, w, ret) - } -} - -// Generates wallet seed -// URI: /api/v1/wallet/newSeed -// Method: GET -// Args: -// entropy: entropy bitsize [optional, default value of 128 will be used if not set] -func newSeedHandler() http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - wh.Error405(w) - return - } - - entropyValue := r.FormValue("entropy") - if entropyValue == "" { - entropyValue = "128" - } - - entropyBits, err := strconv.Atoi(entropyValue) - if err != nil { - wh.Error400(w, "invalid entropy") - return - } - - // Entropy bit size can either be 128 or 256 - if entropyBits != 128 && entropyBits != 256 { - wh.Error400(w, "entropy length must be 128 or 256") - return - } - - entropy, err := bip39.NewEntropy(entropyBits) - if err != nil { - err = fmt.Errorf("bip39.NewEntropy failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - mnemonic, err := bip39.NewMnemonic(entropy) - if err != nil { - err = fmt.Errorf("bip39.NewDefaultMnemonic failed: %v", err) - wh.Error500(w, err.Error()) - return - } - - var rlt = struct { - Seed string `json:"seed"` - }{ - mnemonic, - } - wh.SendJSONOr500(logger, w, rlt) - } -} - -// WalletSeedResponse is returned by /api/v1/wallet/seed -type WalletSeedResponse struct { - Seed string `json:"seed"` - SeedPassphrase string `json:"seed_passphrase,omitempty"` -} - -// Returns seed and seed passphrase of wallet of given id -// URI: /api/v1/wallet/seed -// Method: POST -// Args: -// id: wallet id -// password: wallet password -func walletSeedHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - id := r.FormValue("id") - if id == "" { - wh.Error400(w, "missing wallet id") - return - } - - password := r.FormValue("password") - defer func() { - password = "" - }() - - seed, seedPassphrase, err := gateway.GetWalletSeed(id, []byte(password)) - if err != nil { - switch err { - case wallet.ErrMissingPassword, - wallet.ErrWalletNotEncrypted, - wallet.ErrInvalidPassword: - wh.Error400(w, err.Error()) - case wallet.ErrWalletAPIDisabled, wallet.ErrSeedAPIDisabled: - wh.Error403(w, "") - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - v := WalletSeedResponse{ - Seed: seed, - SeedPassphrase: seedPassphrase, - } - - wh.SendJSONOr500(logger, w, v) - } -} - -// VerifySeedRequest is the request data for POST /api/v2/wallet/seed/verify -type VerifySeedRequest struct { - Seed string `json:"seed"` -} - -// walletVerifySeedHandler verifies a wallet seed -// Method: POST -// URI: /api/v2/wallet/seed/verify -func walletVerifySeedHandler(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req VerifySeedRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.Seed == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "seed is required") - writeHTTPResponse(w, resp) - return - } - - if err := bip39.ValidateMnemonic(req.Seed); err != nil { - resp := NewHTTPErrorResponse(http.StatusUnprocessableEntity, err.Error()) - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{Data: struct{}{}}) -} - -// Unloads wallet from the wallet service -// URI: /api/v1/wallet/unload -// Method: POST -// Args: -// id: wallet id -func walletUnloadHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - id := r.FormValue("id") - if id == "" { - wh.Error400(w, "missing wallet id") - return - } - - if err := gateway.UnloadWallet(id); err != nil { - switch err { - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - default: - wh.Error500(w, err.Error()) - } - } - } -} - -// Encrypts wallet -// URI: /api/v1/wallet/encrypt -// Method: POST -// Args: -// id: wallet id -// password: wallet password -func walletEncryptHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - id := r.FormValue("id") - if id == "" { - wh.Error400(w, "missing wallet id") - return - } - - password := r.FormValue("password") - defer func() { - password = "" - }() - - wlt, err := gateway.EncryptWallet(id, []byte(password)) - if err != nil { - switch err { - case wallet.ErrWalletEncrypted, - wallet.ErrMissingPassword, - wallet.ErrInvalidPassword: - wh.Error400(w, err.Error()) - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - // Make sure the sensitive data are wiped - rlt, err := NewWalletResponse(wlt) - if err != nil { - wh.Error500(w, err.Error()) - return - } - wh.SendJSONOr500(logger, w, rlt) - } -} - -// Decrypts wallet -// URI: /api/v1/wallet/decrypt -// Method: POST -// Args: -// id: wallet id -// password: wallet password -func walletDecryptHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - wh.Error405(w) - return - } - - id := r.FormValue("id") - if id == "" { - wh.Error400(w, "missing wallet id") - return - } - - password := r.FormValue("password") - defer func() { - password = "" - }() - - wlt, err := gateway.DecryptWallet(id, []byte(password)) - if err != nil { - switch err { - case wallet.ErrMissingPassword, - wallet.ErrWalletNotEncrypted, - wallet.ErrInvalidPassword: - wh.Error400(w, err.Error()) - case wallet.ErrWalletAPIDisabled: - wh.Error403(w, "") - case wallet.ErrWalletNotExist: - wh.Error404(w, "") - default: - wh.Error500(w, err.Error()) - } - return - } - - rlt, err := NewWalletResponse(wlt) - if err != nil { - wh.Error500(w, err.Error()) - return - } - wh.SendJSONOr500(logger, w, rlt) - } -} - -// WalletRecoverRequest is the request data for POST /api/v2/wallet/recover -type WalletRecoverRequest struct { - ID string `json:"id"` - Seed string `json:"seed"` - SeedPassphrase string `json:"seed_passphrase"` - Password string `json:"password"` -} - -// URI: /api/v2/wallet/recover -// Method: POST -// Args: -// id: wallet id -// seed: wallet seed -// password: [optional] new password -// Recovers an encrypted wallet by providing the seed. -// The first address will be generated from seed and compared to the first address -// of the specified wallet. If they match, the wallet will be regenerated -// with an optional password. -// If the wallet is not encrypted, an error is returned. -func walletRecoverHandler(gateway Gatewayer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - resp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, "") - writeHTTPResponse(w, resp) - return - } - - var req WalletRecoverRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - resp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - writeHTTPResponse(w, resp) - return - } - - if req.ID == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "id is required") - writeHTTPResponse(w, resp) - return - } - - if req.Seed == "" { - resp := NewHTTPErrorResponse(http.StatusBadRequest, "seed is required") - writeHTTPResponse(w, resp) - return - } - - var password []byte - if req.Password != "" { - password = []byte(req.Password) - } - - defer func() { - req.Seed = "" - req.SeedPassphrase = "" - req.Password = "" - password = nil - }() - - wlt, err := gateway.RecoverWallet(req.ID, req.Seed, req.SeedPassphrase, password) - if err != nil { - var resp HTTPResponse - switch err.(type) { - case wallet.Error: - switch err { - case wallet.ErrWalletNotExist: - resp = NewHTTPErrorResponse(http.StatusNotFound, "") - case wallet.ErrWalletAPIDisabled: - resp = NewHTTPErrorResponse(http.StatusForbidden, "") - default: - resp = NewHTTPErrorResponse(http.StatusBadRequest, err.Error()) - } - default: - resp = NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - } - writeHTTPResponse(w, resp) - return - } - - rlt, err := NewWalletResponse(wlt) - if err != nil { - resp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error()) - writeHTTPResponse(w, resp) - return - } - - writeHTTPResponse(w, HTTPResponse{ - Data: rlt, - }) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/README.md b/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/README.md deleted file mode 100644 index 20c62e9..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/README.md +++ /dev/null @@ -1,8 +0,0 @@ -encoder -======= - -[![GoDoc](http://godoc.org/github.com//SkycoinProject/skycoin/src/cipher/encoder?status.png)](http://godoc.org/github.com/SkycoinProject/skycoin/src/cipher/encoder) - -[Godoc generated documentation](https://godoc.org/github.com/SkycoinProject/skycoin/src/cipher/encoder) - -Binary struct encoder for Go. Fork of go's pkg encoding/binary. diff --git a/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/encoder.go b/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/encoder.go deleted file mode 100644 index da17043..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/cipher/encoder/encoder.go +++ /dev/null @@ -1,1027 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package encoder binary implements translation between struct data and byte sequences -// -// Fields can be ignored with the struct tag `enc:"-"` . -// Unexported struct fields are ignored by default . -// -// Fields can be skipped if empty with the struct tag `enc:",omitempty"` -// Note the comma, which follows package json's conventions. -// Only Slice, Map and String types recognize the omitempty tag. -// When omitempty is set, the no data will be written if the value is empty. -// If the value is empty and omitempty is not set, then a length prefix with value 0 would be written. -// omitempty can only be used for the last field in the struct -// -// Encoding of maps is supported, but note that the use of them results in non-deterministic output. -// If determinism is required, do not use map. -// -// A length restriction to certain fields can be applied when decoding. -// Use the tag `,maxlen=` on a struct field to apply this restriction. -// `maxlen` works for string and slice types. The length is interpreted as the length -// of the string or the number of elements in the slice. -// Note that maxlen does not affect serialization; it may serialize objects which could fail deserialization. -// Callers should check their length restricted values manually prior to serialization. -package encoder - -import ( - "errors" - "fmt" - "log" - "math" - "reflect" - "strconv" - "strings" -) - -var ( - // ErrBufferUnderflow bytes in input buffer not enough to deserialize expected type - ErrBufferUnderflow = errors.New("Not enough buffer data to deserialize") - // ErrBufferOverflow bytes in output buffer not enough to serialize expected type - ErrBufferOverflow = errors.New("Not enough buffer data to serialize") - // ErrInvalidOmitEmpty field tagged with omitempty and it's not last one in struct - ErrInvalidOmitEmpty = errors.New("omitempty only supported for the final field in the struct") - // ErrRemainingBytes bytes remain in buffer after deserializing object - ErrRemainingBytes = errors.New("Bytes remain in buffer after deserializing object") - // ErrMaxLenExceeded a specified maximum length was exceeded when serializing or deserializing a variable length field - ErrMaxLenExceeded = errors.New("Maximum length exceeded for variable length field") - // ErrMapDuplicateKeys encountered duplicate map keys while decoding a map - ErrMapDuplicateKeys = errors.New("Duplicate keys encountered while decoding a map") - // ErrInvalidBool is returned if the decoder encounters a value other than 0 or 1 for a bool type field - ErrInvalidBool = errors.New("Invalid value for bool type") -) - -// SerializeUint32 serializes a uint32 -func SerializeUint32(x uint32) []byte { - var b [4]byte - lePutUint32(b[:], x) - return b[:] -} - -// DeserializeUint32 serializes a uint32 -func DeserializeUint32(buf []byte) (uint32, uint64, error) { - if len(buf) < 4 { - return 0, 0, ErrBufferUnderflow - } - return leUint32(buf[:4]), 4, nil -} - -// SerializeAtomic encodes an integer or boolean contained in `data` to bytes. -// Panics if `data` is not an integer or boolean type. -func SerializeAtomic(data interface{}) []byte { - var b [8]byte - - switch v := data.(type) { - case bool: - if v { - b[0] = 1 - } else { - b[0] = 0 - } - return b[:1] - case int8: - b[0] = byte(v) - return b[:1] - case uint8: - b[0] = v - return b[:1] - case int16: - lePutUint16(b[:2], uint16(v)) - return b[:2] - case uint16: - lePutUint16(b[:2], v) - return b[:2] - case int32: - lePutUint32(b[:4], uint32(v)) - return b[:4] - case uint32: - lePutUint32(b[:4], v) - return b[:4] - case int64: - lePutUint64(b[:8], uint64(v)) - return b[:8] - case uint64: - lePutUint64(b[:8], v) - return b[:8] - default: - log.Panic("SerializeAtomic unhandled type") - return nil - } -} - -// DeserializeAtomic deserializes `in` buffer into `data` -// parameter. Panics if `data` is not an integer or boolean type. -// Returns the number of bytes read. -func DeserializeAtomic(in []byte, data interface{}) (uint64, error) { - switch v := data.(type) { - case *bool: - if len(in) < 1 { - return 0, ErrBufferUnderflow - } - if in[0] == 0 { - *v = false - } else { - *v = true - } - return 1, nil - case *int8: - if len(in) < 1 { - return 0, ErrBufferUnderflow - } - *v = int8(in[0]) - return 1, nil - case *uint8: - if len(in) < 1 { - return 0, ErrBufferUnderflow - } - *v = in[0] - return 1, nil - case *int16: - if len(in) < 2 { - return 0, ErrBufferUnderflow - } - *v = int16(leUint16(in[:2])) - return 2, nil - case *uint16: - if len(in) < 2 { - return 0, ErrBufferUnderflow - } - *v = leUint16(in[:2]) - return 2, nil - case *int32: - if len(in) < 4 { - return 0, ErrBufferUnderflow - } - *v = int32(leUint32(in[:4])) - return 4, nil - case *uint32: - if len(in) < 4 { - return 0, ErrBufferUnderflow - } - *v = leUint32(in[:4]) - return 4, nil - case *int64: - if len(in) < 8 { - return 0, ErrBufferUnderflow - } - *v = int64(leUint64(in[:8])) - return 8, nil - case *uint64: - if len(in) < 8 { - return 0, ErrBufferUnderflow - } - *v = leUint64(in[:8]) - return 8, nil - default: - log.Panic("DeserializeAtomic unhandled type") - return 0, nil - } -} - -// SerializeString serializes a string to []byte -func SerializeString(s string) []byte { - v := reflect.ValueOf(s) - size := datasizeWrite(v) - buf := make([]byte, size) - e := &Encoder{ - Buffer: buf, - } - e.value(v) - return buf -} - -// DeserializeString deserializes a string from []byte, returning the string and the number of bytes read -func DeserializeString(in []byte, maxlen int) (string, uint64, error) { - var s string - v := reflect.ValueOf(&s) - v = v.Elem() - - inlen := len(in) - d1 := &Decoder{ - Buffer: make([]byte, inlen), - } - copy(d1.Buffer, in) - - err := d1.value(v, maxlen) - if err != nil { - return "", 0, err - } - - return s, uint64(inlen - len(d1.Buffer)), nil -} - -// DeserializeRaw deserializes `in` buffer into return -// parameter. If `data` is not a Pointer or Map type an error -// is returned. If `in` buffer can't be deserialized, -// an error message is returned. -// Returns number of bytes read if no error. -func DeserializeRaw(in []byte, data interface{}) (uint64, error) { - v := reflect.ValueOf(data) - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - case reflect.Map: - default: - return 0, fmt.Errorf("DeserializeRaw value must be a ptr, is %s", v.Kind().String()) - } - - inlen := len(in) - d1 := &Decoder{ - Buffer: make([]byte, inlen), - } - copy(d1.Buffer, in) - - if err := d1.value(v, 0); err != nil { - return 0, err - } - - return uint64(inlen - len(d1.Buffer)), nil -} - -// DeserializeRawExact deserializes `in` buffer into return -// parameter. If `data` is not a Pointer or Map type an error -// is returned. If `in` buffer can't be deserialized, -// an error message is returned. -// Returns number of bytes read if no error. -// If the number of bytes read does not equal the length of the input buffer, -// ErrRemainingBytes is returned. -func DeserializeRawExact(in []byte, data interface{}) error { - n, err := DeserializeRaw(in, data) - if err != nil { - return err - } - if n != uint64(len(in)) { - return ErrRemainingBytes - } - return nil -} - -// DeserializeRawToValue deserializes `in` buffer into -// `dst`'s type and returns the number of bytes used and -// the value of the buffer. If `data` is not either a -// Pointer type an error is returned. -// If `in` buffer can't be deserialized, the number of bytes read and an error message are returned. -func DeserializeRawToValue(in []byte, v reflect.Value) (uint64, error) { - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - case reflect.Map: - default: - return 0, fmt.Errorf("DeserializeRawToValue value must be a ptr, is %s", v.Kind().String()) - } - - inlen := len(in) - d1 := &Decoder{ - Buffer: make([]byte, inlen), - } - copy(d1.Buffer, in) - - err := d1.value(v, 0) - if err != nil { - return 0, err - } - - return uint64(inlen - len(d1.Buffer)), nil -} - -// Serialize returns serialized basic type-based `data` -// parameter. Encoding is reflect-based. Panics if `data` is not serializable. -func Serialize(data interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(data)) - size := datasizeWrite(v) - buf := make([]byte, size) - e := &Encoder{ - Buffer: buf, - } - e.value(v) - return buf -} - -// Size returns how many bytes would it take to encode the -// value v, which must be a fixed-size value (struct) or a -// slice of fixed-size values, or a pointer to such data. -// Reflect-based encoding is used. -func Size(v interface{}) uint64 { - return datasizeWrite(reflect.Indirect(reflect.ValueOf(v))) -} - -// isEmpty returns true if a value is "empty". -// Only supports Slice, Map and String. -// All other values are never considered empty. -func isEmpty(v reflect.Value) bool { - t := v.Type() - switch t.Kind() { - case reflect.String: - return v.Len() == 0 - case reflect.Map: - return v.IsNil() || v.Len() == 0 - case reflect.Slice: - return v.IsNil() || v.Len() == 0 - default: - return false - } -} - -// datasizeWrite returns the number of bytes the actual data represented by v occupies in memory. -// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice -// it returns the length of the slice times the element size and does not count the memory -// occupied by the header. -func datasizeWrite(v reflect.Value) uint64 { - t := v.Type() - switch t.Kind() { - case reflect.Interface: - return datasizeWrite(v.Elem()) - - case reflect.Array: - // Arrays are a fixed size, so the length is not written - t := v.Type() - elem := t.Elem() - switch elem.Kind() { - case reflect.Uint8, reflect.Int8: - return uint64(v.Len()) - case reflect.Uint16, reflect.Int16: - return uint64(v.Len()) * 2 - case reflect.Uint32, reflect.Int32, reflect.Float32: - return uint64(v.Len()) * 4 - case reflect.Uint64, reflect.Int64, reflect.Float64: - return uint64(v.Len()) * 8 - default: - size := uint64(0) - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - s := datasizeWrite(elem) - size += s - } - return size - } - - case reflect.Slice: - t := v.Type() - elem := t.Elem() - switch elem.Kind() { - case reflect.Uint8, reflect.Int8: - return 4 + uint64(v.Len()) - case reflect.Uint16, reflect.Int16: - return 4 + uint64(v.Len())*2 - case reflect.Uint32, reflect.Int32, reflect.Float32: - return 4 + uint64(v.Len())*4 - case reflect.Uint64, reflect.Int64, reflect.Float64: - return 4 + uint64(v.Len())*8 - default: - size := uint64(0) - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - s := datasizeWrite(elem) - size += s - } - return 4 + size - } - - case reflect.Map: - // length prefix - size := uint64(4) - for _, key := range v.MapKeys() { - s := datasizeWrite(key) - size += s - elem := v.MapIndex(key) - s = datasizeWrite(elem) - size += s - } - return size - - case reflect.Struct: - sum := uint64(0) - nFields := t.NumField() - for i, n := 0, nFields; i < n; i++ { - ff := t.Field(i) - // Skip unexported fields - if ff.PkgPath != "" { - continue - } - - tag := ff.Tag.Get("enc") - omitempty := TagOmitempty(tag) - - if omitempty && i != nFields-1 { - log.Panic(ErrInvalidOmitEmpty) - } - - if len(tag) > 0 && tag[0] == '-' { - continue - } - - fv := v.Field(i) - if !omitempty || !isEmpty(fv) { - s := datasizeWrite(fv) - sum += s - } - } - return sum - - case reflect.Bool: - return 1 - - case reflect.String: - return 4 + uint64(v.Len()) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Float32, reflect.Float64: - return uint64(t.Size()) - - default: - log.Panicf("invalid type %s", t.String()) - return 0 - } -} - -// TagOmitempty returns true if the tag specifies omitempty -func TagOmitempty(tag string) bool { - return strings.Contains(tag, ",omitempty") -} - -func tagName(tag string) string { //nolint:deadcode,megacheck - commaIndex := strings.Index(tag, ",") - if commaIndex == -1 { - return tag - } - - return tag[:commaIndex] -} - -// TagMaxLen returns the maxlen value tagged on a struct. Returns 0 if no tag is present. -func TagMaxLen(tag string) int { - maxlenIndex := strings.Index(tag, ",maxlen=") - if maxlenIndex == -1 { - return 0 - } - - maxlenRem := tag[maxlenIndex+len(",maxlen="):] - commaIndex := strings.Index(maxlenRem, ",") - if commaIndex != -1 { - maxlenRem = maxlenRem[:commaIndex] - } - - maxlen, err := strconv.Atoi(maxlenRem) - if err != nil { - panic("maxlen must be a number") - } - - return maxlen -} - -/* - Internals -*/ - -func leUint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 } - -func lePutUint16(b []byte, v uint16) { - b[0] = byte(v) - b[1] = byte(v >> 8) -} - -func leUint32(b []byte) uint32 { - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func lePutUint32(b []byte, v uint32) { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) -} - -func leUint64(b []byte) uint64 { - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func lePutUint64(b []byte, v uint64) { - b[0] = byte(v) - b[1] = byte(v >> 8) - b[2] = byte(v >> 16) - b[3] = byte(v >> 24) - b[4] = byte(v >> 32) - b[5] = byte(v >> 40) - b[6] = byte(v >> 48) - b[7] = byte(v >> 56) -} - -// Decoder decodes an object from the skycoin binary encoding format -type Decoder struct { - Buffer []byte -} - -// Encoder encodes an object to the skycoin binary encoding format -type Encoder struct { - Buffer []byte -} - -// Bool decodes bool -func (d *Decoder) Bool() (bool, error) { - if len(d.Buffer) < 1 { - return false, ErrBufferUnderflow - } - x := d.Buffer[0] - d.Buffer = d.Buffer[1:] // advance slice - - switch x { - case 0: - return false, nil - case 1: - return true, nil - default: - return false, ErrInvalidBool - } -} - -// Bool encodes bool -func (e *Encoder) Bool(x bool) { - if x { - e.Buffer[0] = 1 - } else { - e.Buffer[0] = 0 - } - e.Buffer = e.Buffer[1:] -} - -// Uint8 decodes uint8 -func (d *Decoder) Uint8() (uint8, error) { - if len(d.Buffer) < 1 { - return 0, ErrBufferUnderflow - } - - x := d.Buffer[0] - d.Buffer = d.Buffer[1:] // advance slice - return x, nil -} - -// Uint8 encodes uint8 -func (e *Encoder) Uint8(x uint8) { - e.Buffer[0] = x - e.Buffer = e.Buffer[1:] -} - -// Uint16 decodes uint16 -func (d *Decoder) Uint16() (uint16, error) { - if len(d.Buffer) < 2 { - return 0, ErrBufferUnderflow - } - - x := leUint16(d.Buffer[0:2]) - d.Buffer = d.Buffer[2:] - return x, nil -} - -// Uint16 encodes uint16 -func (e *Encoder) Uint16(x uint16) { - lePutUint16(e.Buffer[0:2], x) - e.Buffer = e.Buffer[2:] -} - -// Uint32 decodes a Uint32 -func (d *Decoder) Uint32() (uint32, error) { - if len(d.Buffer) < 4 { - return 0, ErrBufferUnderflow - } - - x := leUint32(d.Buffer[0:4]) - d.Buffer = d.Buffer[4:] - return x, nil -} - -// Uint32 encodes a Uint32 -func (e *Encoder) Uint32(x uint32) { - lePutUint32(e.Buffer[0:4], x) - e.Buffer = e.Buffer[4:] -} - -// Uint64 decodes uint64 -func (d *Decoder) Uint64() (uint64, error) { - if len(d.Buffer) < 8 { - return 0, ErrBufferUnderflow - } - - x := leUint64(d.Buffer[0:8]) - d.Buffer = d.Buffer[8:] - return x, nil -} - -// Uint64 encodes uint64 -func (e *Encoder) Uint64(x uint64) { - lePutUint64(e.Buffer[0:8], x) - e.Buffer = e.Buffer[8:] -} - -// ByteSlice encodes []byte -func (e *Encoder) ByteSlice(x []byte) { - e.Uint32(uint32(len(x))) - e.CopyBytes(x) -} - -// CopyBytes copies bytes to the buffer, without a length prefix -func (e *Encoder) CopyBytes(x []byte) { - if len(x) == 0 { - return - } - copy(e.Buffer, x) - e.Buffer = e.Buffer[len(x):] -} - -// Int8 decodes int8 -func (d *Decoder) Int8() (int8, error) { - u, err := d.Uint8() - if err != nil { - return 0, err - } - - return int8(u), nil -} - -// Int8 encodes int8 -func (e *Encoder) Int8(x int8) { - e.Uint8(uint8(x)) -} - -// Int16 decodes int16 -func (d *Decoder) Int16() (int16, error) { - u, err := d.Uint16() - if err != nil { - return 0, err - } - - return int16(u), nil -} - -// Int16 encodes int16 -func (e *Encoder) Int16(x int16) { - e.Uint16(uint16(x)) -} - -// Int32 decodes int32 -func (d *Decoder) Int32() (int32, error) { - u, err := d.Uint32() - if err != nil { - return 0, err - } - - return int32(u), nil -} - -// Int32 encodes int32 -func (e *Encoder) Int32(x int32) { - e.Uint32(uint32(x)) -} - -// Int64 decodes int64 -func (d *Decoder) Int64() (int64, error) { - u, err := d.Uint64() - if err != nil { - return 0, err - } - - return int64(u), nil -} - -// Int64 encodes int64 -func (e *Encoder) Int64(x int64) { - e.Uint64(uint64(x)) -} - -func (d *Decoder) value(v reflect.Value, maxlen int) error { - kind := v.Kind() - switch kind { - case reflect.Array: - - t := v.Type() - elem := t.Elem() - - // Arrays are a fixed size, so the length is not written - length := v.Len() - - switch elem.Kind() { - case reflect.Uint8: - if length > len(d.Buffer) { - return ErrBufferUnderflow - } - - reflect.Copy(v, reflect.ValueOf(d.Buffer[:length])) - d.Buffer = d.Buffer[length:] - default: - for i := 0; i < length; i++ { - if err := d.value(v.Index(i), 0); err != nil { - return err - } - } - } - - case reflect.Map: - ul, err := d.Uint32() - if err != nil { - return err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return ErrBufferUnderflow - } - - if length == 0 { - return nil - } - - t := v.Type() - key := t.Key() - elem := t.Elem() - - if v.IsNil() { - v.Set(reflect.Indirect(reflect.MakeMap(t))) - } - - for i := 0; i < length; i++ { - keyv := reflect.Indirect(reflect.New(key)) - elemv := reflect.Indirect(reflect.New(elem)) - if err := d.value(keyv, 0); err != nil { - return err - } - if err := d.value(elemv, 0); err != nil { - return err - } - v.SetMapIndex(keyv, elemv) - } - - if v.Len() != length { - return ErrMapDuplicateKeys - } - - case reflect.Slice: - ul, err := d.Uint32() - if err != nil { - return err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return ErrBufferUnderflow - } - - if length == 0 { - return nil - } - - if maxlen > 0 && length > maxlen { - return ErrMaxLenExceeded - } - - t := v.Type() - elem := t.Elem() - - switch elem.Kind() { - case reflect.Uint8: - v.SetBytes(d.Buffer[:length]) - d.Buffer = d.Buffer[length:] - default: - elemvs := reflect.MakeSlice(t, length, length) - for i := 0; i < length; i++ { - elemv := reflect.Indirect(elemvs.Index(i)) - if err := d.value(elemv, 0); err != nil { - return err - } - } - v.Set(elemvs) - } - - case reflect.Struct: - t := v.Type() - nFields := v.NumField() - for i := 0; i < nFields; i++ { - ff := t.Field(i) - // Skip unexported fields - if ff.PkgPath != "" { - continue - } - - tag := ff.Tag.Get("enc") - omitempty := TagOmitempty(tag) - - if omitempty && i != nFields-1 { - log.Panic(ErrInvalidOmitEmpty) - } - - if len(tag) > 0 && tag[0] == '-' { - continue - } - - fv := v.Field(i) - if fv.CanSet() && ff.Name != "_" { - maxlen := TagMaxLen(tag) - - if err := d.value(fv, maxlen); err != nil { - if err == ErrMaxLenExceeded { - return err - } - - // omitempty fields at the end of the buffer are ignored if missing - if !omitempty || len(d.Buffer) != 0 { - return err - } - } - } - } - - case reflect.String: - ul, err := d.Uint32() - if err != nil { - return err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return ErrBufferUnderflow - } - - if maxlen > 0 && length > maxlen { - return ErrMaxLenExceeded - } - - v.SetString(string(d.Buffer[:length])) - d.Buffer = d.Buffer[length:] - - case reflect.Bool: - b, err := d.Bool() - if err != nil { - return err - } - v.SetBool(b) - case reflect.Int8: - i, err := d.Int8() - if err != nil { - return err - } - v.SetInt(int64(i)) - case reflect.Int16: - i, err := d.Int16() - if err != nil { - return err - } - v.SetInt(int64(i)) - case reflect.Int32: - i, err := d.Int32() - if err != nil { - return err - } - v.SetInt(int64(i)) - case reflect.Int64: - i, err := d.Int64() - if err != nil { - return err - } - v.SetInt(i) - - case reflect.Uint8: - u, err := d.Uint8() - if err != nil { - return err - } - v.SetUint(uint64(u)) - case reflect.Uint16: - u, err := d.Uint16() - if err != nil { - return err - } - v.SetUint(uint64(u)) - case reflect.Uint32: - u, err := d.Uint32() - if err != nil { - return err - } - v.SetUint(uint64(u)) - case reflect.Uint64: - u, err := d.Uint64() - if err != nil { - return err - } - v.SetUint(u) - - case reflect.Float32: - u, err := d.Uint32() - if err != nil { - return err - } - v.SetFloat(float64(math.Float32frombits(u))) - case reflect.Float64: - u, err := d.Uint64() - if err != nil { - return err - } - v.SetFloat(math.Float64frombits(u)) - - default: - log.Panicf("Decode error: kind %s not handled", v.Kind().String()) - } - - return nil -} - -func (e *Encoder) value(v reflect.Value) { - switch v.Kind() { - case reflect.Interface: - e.value(v.Elem()) - - case reflect.Array: - // Arrays are a fixed size, so the length is not written - t := v.Type() - elem := t.Elem() - switch elem.Kind() { - case reflect.Uint8: - reflect.Copy(reflect.ValueOf(e.Buffer), v) - e.Buffer = e.Buffer[v.Len():] - default: - for i := 0; i < v.Len(); i++ { - e.value(v.Index(i)) - } - } - - case reflect.Slice: - t := v.Type() - elem := t.Elem() - switch elem.Kind() { - case reflect.Uint8: - e.ByteSlice(v.Bytes()) - default: - e.Uint32(uint32(v.Len())) - for i := 0; i < v.Len(); i++ { - e.value(v.Index(i)) - } - } - - case reflect.Map: - e.Uint32(uint32(v.Len())) - for _, key := range v.MapKeys() { - e.value(key) - e.value(v.MapIndex(key)) - } - - case reflect.Struct: - t := v.Type() - nFields := v.NumField() - for i := 0; i < nFields; i++ { - // see comment for corresponding code in Decoder.value() - ff := t.Field(i) - // Skip unexported fields - if ff.PkgPath != "" { - continue - } - - tag := ff.Tag.Get("enc") - omitempty := TagOmitempty(tag) - - if omitempty && i != nFields-1 { - log.Panic(ErrInvalidOmitEmpty) - } - - if len(tag) > 0 && tag[0] == '-' { - continue - } - - fv := v.Field(i) - if !(omitempty && isEmpty(fv)) && (fv.CanSet() || ff.Name != "_") { - e.value(fv) - } - } - - case reflect.Bool: - e.Bool(v.Bool()) - - case reflect.String: - e.ByteSlice([]byte(v.String())) - - case reflect.Int8: - e.Int8(int8(v.Int())) - case reflect.Int16: - e.Int16(int16(v.Int())) - case reflect.Int32: - e.Int32(int32(v.Int())) - case reflect.Int64: - e.Int64(v.Int()) - - case reflect.Uint8: - e.Uint8(uint8(v.Uint())) - case reflect.Uint16: - e.Uint16(uint16(v.Uint())) - case reflect.Uint32: - e.Uint32(uint32(v.Uint())) - case reflect.Uint64: - e.Uint64(v.Uint()) - - case reflect.Float32: - e.Uint32(math.Float32bits(float32(v.Float()))) - case reflect.Float64: - e.Uint64(math.Float64bits(v.Float())) - - default: - log.Panicf("Encoding unhandled type %s", v.Type().Name()) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/block.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/block.go deleted file mode 100644 index 00a9a89..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/block.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Package coin defines the core blockchain datastructures. - -This package should not have any dependencies except for go stdlib and cipher. -*/ -package coin - -import ( - "fmt" - "log" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -//go:generate skyencoder -struct BlockHeader -unexported -//go:generate skyencoder -struct BlockBody -unexported - -// MaxBlockTransactions is the maximum number of transactions in a block (see the maxlen struct tag value applied to BlockBody.Transactions) -const MaxBlockTransactions = 65535 - -// Block represents the block struct -type Block struct { - Head BlockHeader - Body BlockBody -} - -// HashPair including current block hash and previous block hash. -type HashPair struct { - Hash cipher.SHA256 - PrevHash cipher.SHA256 -} - -// BlockHeader records the block header -type BlockHeader struct { - Version uint32 - - Time uint64 - BkSeq uint64 // Increment every block - Fee uint64 // Fee in block - - PrevHash cipher.SHA256 // Hash of header of previous block - BodyHash cipher.SHA256 // Hash of transaction block - - UxHash cipher.SHA256 // XOR of sha256 of elements in unspent output set -} - -// BlockBody represents the block body -type BlockBody struct { - Transactions Transactions `enc:",maxlen=65535"` -} - -// SignedBlock signed block -type SignedBlock struct { - Block - Sig cipher.Sig -} - -// VerifySignature verifies that the block is signed by pubkey -func (b SignedBlock) VerifySignature(pubkey cipher.PubKey) error { - return cipher.VerifyPubKeySignedHash(pubkey, b.Sig, b.HashHeader()) -} - -// NewBlock creates new block. -func NewBlock(prev Block, currentTime uint64, uxHash cipher.SHA256, txns Transactions, calc FeeCalculator) (*Block, error) { - if len(txns) == 0 { - return nil, fmt.Errorf("Refusing to create block with no transactions") - } - - fee, err := txns.Fees(calc) - if err != nil { - // This should have been caught earlier - return nil, fmt.Errorf("Invalid transaction fees: %v", err) - } - - body := BlockBody{txns} - head := NewBlockHeader(prev.Head, uxHash, currentTime, fee, body) - return &Block{ - Head: head, - Body: body, - }, nil -} - -// NewGenesisBlock creates genesis block -func NewGenesisBlock(genesisAddr cipher.Address, genesisCoins, timestamp uint64) (*Block, error) { - txn := Transaction{} - if err := txn.PushOutput(genesisAddr, genesisCoins, genesisCoins); err != nil { - return nil, err - } - body := BlockBody{Transactions: Transactions{txn}} - prevHash := cipher.SHA256{} - bodyHash := body.Hash() - head := BlockHeader{ - Time: timestamp, - BodyHash: bodyHash, - PrevHash: prevHash, - BkSeq: 0, - Version: 0, - Fee: 0, - UxHash: cipher.SHA256{}, - } - b := &Block{ - Head: head, - Body: body, - } - - return b, nil -} - -// HashHeader return hash of block head. -func (b Block) HashHeader() cipher.SHA256 { - return b.Head.Hash() -} - -// Time return the head time of the block. -func (b Block) Time() uint64 { - return b.Head.Time -} - -// Seq return the head seq of the block. -func (b Block) Seq() uint64 { - return b.Head.BkSeq -} - -// Size returns the size of the Block's Transactions, in bytes -func (b Block) Size() (uint32, error) { - return b.Body.Size() -} - -// NewBlockHeader creates block header -func NewBlockHeader(prev BlockHeader, uxHash cipher.SHA256, currentTime, fee uint64, body BlockBody) BlockHeader { - if currentTime <= prev.Time { - log.Panic("Time can only move forward") - } - bodyHash := body.Hash() - prevHash := prev.Hash() - return BlockHeader{ - BodyHash: bodyHash, - Version: prev.Version, - PrevHash: prevHash, - Time: currentTime, - BkSeq: prev.BkSeq + 1, - Fee: fee, - UxHash: uxHash, - } -} - -// Hash return hash of block header -func (bh *BlockHeader) Hash() cipher.SHA256 { - return cipher.SumSHA256(bh.Bytes()) -} - -// Bytes serialize the blockheader and return the byte value. -func (bh *BlockHeader) Bytes() []byte { - buf, err := encodeBlockHeader(bh) - if err != nil { - log.Panicf("encodeBlockHeader failed: %v", err) - } - return buf -} - -// Hash returns the merkle hash of contained transactions -func (bb BlockBody) Hash() cipher.SHA256 { - hashes := make([]cipher.SHA256, len(bb.Transactions)) - for i := range bb.Transactions { - hashes[i] = bb.Transactions[i].Hash() - } - // Merkle hash of transactions - return cipher.Merkle(hashes) -} - -// Size returns the size of Transactions, in bytes -func (bb BlockBody) Size() (uint32, error) { - // We can't use length of self.Bytes() because it has a length prefix - // Need only the sum of transaction sizes - return bb.Transactions.Size() -} - -// Bytes serialize block body, and return the byte value. -func (bb *BlockBody) Bytes() []byte { - buf, err := encodeBlockBody(bb) - if err != nil { - log.Panicf("encodeBlockBody failed: %v", err) - } - return buf -} - -// CreateUnspents creates the expected outputs for a transaction. -func CreateUnspents(bh BlockHeader, txn Transaction) UxArray { - var h cipher.SHA256 - // The genesis block uses the null hash as the SrcTransaction [FIXME hardfork] - if bh.BkSeq != 0 { - h = txn.Hash() - } - uxo := make(UxArray, len(txn.Out)) - for i := range txn.Out { - uxo[i] = UxOut{ - Head: UxHead{ - Time: bh.Time, - BkSeq: bh.BkSeq, - }, - Body: UxBody{ - SrcTransaction: h, - Address: txn.Out[i].Address, - Coins: txn.Out[i].Coins, - Hours: txn.Out[i].Hours, - }, - } - } - return uxo -} - -// CreateUnspent creates single unspent output -func CreateUnspent(bh BlockHeader, txn Transaction, outIndex int) (UxOut, error) { - if outIndex < 0 || outIndex >= len(txn.Out) { - return UxOut{}, fmt.Errorf("Transaction out index overflows transaction outputs") - } - - var h cipher.SHA256 - // The genesis block uses the null hash as the SrcTransaction [FIXME hardfork] - if bh.BkSeq != 0 { - h = txn.Hash() - } - - return UxOut{ - Head: UxHead{ - Time: bh.Time, - BkSeq: bh.BkSeq, - }, - Body: UxBody{ - SrcTransaction: h, - Address: txn.Out[outIndex].Address, - Coins: txn.Out[outIndex].Coins, - Hours: txn.Out[outIndex].Hours, - }, - }, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/block_body_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/block_body_skyencoder.go deleted file mode 100644 index 26cb15e..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/block_body_skyencoder.go +++ /dev/null @@ -1,407 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeBlockBody computes the size of an encoded object of type BlockBody -func encodeSizeBlockBody(obj *BlockBody) uint64 { - i0 := uint64(0) - - // obj.Transactions - i0 += 4 - for _, x1 := range obj.Transactions { - i1 := uint64(0) - - // x1.Length - i1 += 4 - - // x1.Type - i1++ - - // x1.InnerHash - i1 += 32 - - // x1.Sigs - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 65 - - i1 += uint64(len(x1.Sigs)) * i2 - } - - // x1.In - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 32 - - i1 += uint64(len(x1.In)) * i2 - } - - // x1.Out - i1 += 4 - { - i2 := uint64(0) - - // x2.Address.Version - i2++ - - // x2.Address.Key - i2 += 20 - - // x2.Coins - i2 += 8 - - // x2.Hours - i2 += 8 - - i1 += uint64(len(x1.Out)) * i2 - } - - i0 += i1 - } - - return i0 -} - -// encodeBlockBody encodes an object of type BlockBody to a buffer allocated to the exact size -// required to encode the object. -func encodeBlockBody(obj *BlockBody) ([]byte, error) { - n := encodeSizeBlockBody(obj) - buf := make([]byte, n) - - if err := encodeBlockBodyToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeBlockBodyToBuffer encodes an object of type BlockBody to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeBlockBodyToBuffer(buf []byte, obj *BlockBody) error { - if uint64(len(buf)) < encodeSizeBlockBody(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Transactions maxlen check - if len(obj.Transactions) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transactions length check - if uint64(len(obj.Transactions)) > math.MaxUint32 { - return errors.New("obj.Transactions length exceeds math.MaxUint32") - } - - // obj.Transactions length - e.Uint32(uint32(len(obj.Transactions))) - - // obj.Transactions - for _, x := range obj.Transactions { - - // x.Length - e.Uint32(x.Length) - - // x.Type - e.Uint8(x.Type) - - // x.InnerHash - e.CopyBytes(x.InnerHash[:]) - - // x.Sigs maxlen check - if len(x.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Sigs length check - if uint64(len(x.Sigs)) > math.MaxUint32 { - return errors.New("x.Sigs length exceeds math.MaxUint32") - } - - // x.Sigs length - e.Uint32(uint32(len(x.Sigs))) - - // x.Sigs - for _, x := range x.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // x.In maxlen check - if len(x.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.In length check - if uint64(len(x.In)) > math.MaxUint32 { - return errors.New("x.In length exceeds math.MaxUint32") - } - - // x.In length - e.Uint32(uint32(len(x.In))) - - // x.In - for _, x := range x.In { - - // x - e.CopyBytes(x[:]) - - } - - // x.Out maxlen check - if len(x.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Out length check - if uint64(len(x.Out)) > math.MaxUint32 { - return errors.New("x.Out length exceeds math.MaxUint32") - } - - // x.Out length - e.Uint32(uint32(len(x.Out))) - - // x.Out - for _, x := range x.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - } - - return nil -} - -// decodeBlockBody decodes an object of type BlockBody from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeBlockBody(buf []byte, obj *BlockBody) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions = make([]Transaction, length) - - for z1 := range obj.Transactions { - { - // obj.Transactions[z1].Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Transactions[z1].Length = i - } - - { - // obj.Transactions[z1].Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transactions[z1].Type = i - } - - { - // obj.Transactions[z1].InnerHash - if len(d.Buffer) < len(obj.Transactions[z1].InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].InnerHash[:], d.Buffer[:len(obj.Transactions[z1].InnerHash)]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].InnerHash):] - } - - { - // obj.Transactions[z1].Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].Sigs = make([]cipher.Sig, length) - - for z3 := range obj.Transactions[z1].Sigs { - { - // obj.Transactions[z1].Sigs[z3] - if len(d.Buffer) < len(obj.Transactions[z1].Sigs[z3]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].Sigs[z3][:], d.Buffer[:len(obj.Transactions[z1].Sigs[z3])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].Sigs[z3]):] - } - - } - } - } - - { - // obj.Transactions[z1].In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].In = make([]cipher.SHA256, length) - - for z3 := range obj.Transactions[z1].In { - { - // obj.Transactions[z1].In[z3] - if len(d.Buffer) < len(obj.Transactions[z1].In[z3]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].In[z3][:], d.Buffer[:len(obj.Transactions[z1].In[z3])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].In[z3]):] - } - - } - } - } - - { - // obj.Transactions[z1].Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].Out = make([]TransactionOutput, length) - - for z3 := range obj.Transactions[z1].Out { - { - // obj.Transactions[z1].Out[z3].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Address.Version = i - } - - { - // obj.Transactions[z1].Out[z3].Address.Key - if len(d.Buffer) < len(obj.Transactions[z1].Out[z3].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].Out[z3].Address.Key[:], d.Buffer[:len(obj.Transactions[z1].Out[z3].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].Out[z3].Address.Key):] - } - - { - // obj.Transactions[z1].Out[z3].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Coins = i - } - - { - // obj.Transactions[z1].Out[z3].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Hours = i - } - - } - } - } - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeBlockBodyExact decodes an object of type BlockBody from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeBlockBodyExact(buf []byte, obj *BlockBody) error { - if n, err := decodeBlockBody(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/block_header_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/block_header_skyencoder.go deleted file mode 100644 index f91f527..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/block_header_skyencoder.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeBlockHeader computes the size of an encoded object of type BlockHeader -func encodeSizeBlockHeader(obj *BlockHeader) uint64 { - i0 := uint64(0) - - // obj.Version - i0 += 4 - - // obj.Time - i0 += 8 - - // obj.BkSeq - i0 += 8 - - // obj.Fee - i0 += 8 - - // obj.PrevHash - i0 += 32 - - // obj.BodyHash - i0 += 32 - - // obj.UxHash - i0 += 32 - - return i0 -} - -// encodeBlockHeader encodes an object of type BlockHeader to a buffer allocated to the exact size -// required to encode the object. -func encodeBlockHeader(obj *BlockHeader) ([]byte, error) { - n := encodeSizeBlockHeader(obj) - buf := make([]byte, n) - - if err := encodeBlockHeaderToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeBlockHeaderToBuffer encodes an object of type BlockHeader to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeBlockHeaderToBuffer(buf []byte, obj *BlockHeader) error { - if uint64(len(buf)) < encodeSizeBlockHeader(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Version - e.Uint32(obj.Version) - - // obj.Time - e.Uint64(obj.Time) - - // obj.BkSeq - e.Uint64(obj.BkSeq) - - // obj.Fee - e.Uint64(obj.Fee) - - // obj.PrevHash - e.CopyBytes(obj.PrevHash[:]) - - // obj.BodyHash - e.CopyBytes(obj.BodyHash[:]) - - // obj.UxHash - e.CopyBytes(obj.UxHash[:]) - - return nil -} - -// decodeBlockHeader decodes an object of type BlockHeader from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeBlockHeader(buf []byte, obj *BlockHeader) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Version - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Version = i - } - - { - // obj.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Time = i - } - - { - // obj.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.BkSeq = i - } - - { - // obj.Fee - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Fee = i - } - - { - // obj.PrevHash - if len(d.Buffer) < len(obj.PrevHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.PrevHash[:], d.Buffer[:len(obj.PrevHash)]) - d.Buffer = d.Buffer[len(obj.PrevHash):] - } - - { - // obj.BodyHash - if len(d.Buffer) < len(obj.BodyHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.BodyHash[:], d.Buffer[:len(obj.BodyHash)]) - d.Buffer = d.Buffer[len(obj.BodyHash):] - } - - { - // obj.UxHash - if len(d.Buffer) < len(obj.UxHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.UxHash[:], d.Buffer[:len(obj.UxHash)]) - d.Buffer = d.Buffer[len(obj.UxHash):] - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeBlockHeaderExact decodes an object of type BlockHeader from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeBlockHeaderExact(buf []byte, obj *BlockHeader) error { - if n, err := decodeBlockHeader(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/outputs.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/outputs.go deleted file mode 100644 index 457e5eb..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/outputs.go +++ /dev/null @@ -1,327 +0,0 @@ -package coin - -import ( - "bytes" - "errors" - "fmt" - "log" - "sort" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -/* - Unspent Outputs -*/ - -/* -- hash must only depend on factors known to sender --- hash cannot depend on block executed --- hash cannot depend on sequence number --- hash may depend on nonce - -- hash must depend only on factors known to sender --- needed to minimize divergence during block chain forks -- it should be difficult to create outputs with duplicate ids - -- Uxhash cannot depend on time or block it was created -- time is still needed for -*/ - -/* - For each transaction, keep track of - - order created - - order spent (for rollbacks) -*/ - -//go:generate skyencoder -struct UxHead -unexported -//go:generate skyencoder -struct UxBody -unexported - -// UxOut represents uxout -type UxOut struct { - Head UxHead - Body UxBody //hashed part - //Meta UxMeta -} - -// UxHead metadata (not hashed) -type UxHead struct { - Time uint64 //time of block it was created in - BkSeq uint64 //block it was created in, used to calculate depth - // SpSeq uint64 //block it was spent in -} - -// UxBody uxbody -type UxBody struct { - SrcTransaction cipher.SHA256 // Inner Hash of Transaction - Address cipher.Address // Address of receiver - Coins uint64 // Number of coins - Hours uint64 // Coin hours -} - -// Hash returns the hash of UxBody -func (uo *UxOut) Hash() cipher.SHA256 { - return uo.Body.Hash() -} - -// SnapshotHash returns hash of UxBody + UxHead -func (uo *UxOut) SnapshotHash() cipher.SHA256 { - n1 := encodeSizeUxBody(&uo.Body) - n2 := encodeSizeUxHead(&uo.Head) - buf := make([]byte, n1+n2) - - if err := encodeUxBodyToBuffer(buf[:n1], &uo.Body); err != nil { - log.Panicf("encodeUxBodyToBuffer failed: %v", err) - } - if err := encodeUxHeadToBuffer(buf[n1:], &uo.Head); err != nil { - log.Panicf("encodeUxHeadToBuffer failed: %v", err) - } - - return cipher.SumSHA256(buf) -} - -// Hash returns hash of uxbody -func (ub *UxBody) Hash() cipher.SHA256 { - buf, err := encodeUxBody(ub) - if err != nil { - log.Panicf("encodeUxBody failed: %v", err) - } - return cipher.SumSHA256(buf) -} - -/* - Make independent of block rate? - Then need creation time of output - Creation time of transaction cant be hashed -*/ - -// ErrAddEarnedCoinHoursAdditionOverflow is returned by UxOut.CoinHours() if during the addition of base coin -// hours to additional earned coin hours, the value would overflow a uint64. -// Callers may choose to ignore this errors and use 0 as the coinhours value instead. -// This affects one existing spent output, spent in block 13277. -var ErrAddEarnedCoinHoursAdditionOverflow = errors.New("UxOut.CoinHours addition of earned coin hours overflow") - -// CoinHours Calculate coinhour balance of output. t is the current unix utc time. -func (uo *UxOut) CoinHours(t uint64) (uint64, error) { - if t < uo.Head.Time { - log.Printf("Calculating coin hours with t < head time") - return uo.Body.Hours, nil - } - - seconds := t - uo.Head.Time // number of seconds - - // Calculate whole coin seconds - wholeCoins := uo.Body.Coins / 1e6 - wholeCoinSeconds, err := mathutil.MultUint64(seconds, wholeCoins) - if err != nil { - err := fmt.Errorf("UxOut.CoinHours: Calculating whole coin seconds overflows uint64 seconds=%d coins=%d uxid=%s", seconds, wholeCoins, uo.Hash().Hex()) - log.Printf("%v", err) - return 0, err - } - - // Calculate remainder droplet seconds - remainderDroplets := uo.Body.Coins % 1e6 - dropletSeconds, err := mathutil.MultUint64(seconds, remainderDroplets) - if err != nil { - err := fmt.Errorf("UxOut.CoinHours: Calculating droplet seconds overflows uint64 seconds=%d droplets=%d uxid=%s", seconds, remainderDroplets, uo.Hash().Hex()) - log.Printf("%v", err) - return 0, err - } - - // Add coinSeconds and seconds earned by droplets, rounded off - coinSeconds := wholeCoinSeconds + dropletSeconds/1e6 - - coinHours := coinSeconds / 3600 // coin hours - totalHours, err := mathutil.AddUint64(uo.Body.Hours, coinHours) // starting+earned - if err != nil { - log.Printf("%v uxid=%s", ErrAddEarnedCoinHoursAdditionOverflow, uo.Hash().Hex()) - return 0, ErrAddEarnedCoinHoursAdditionOverflow - } - return totalHours, nil -} - -// UxHashSet set mapping from UxHash to a placeholder value -type UxHashSet map[cipher.SHA256]struct{} - -// UxArray Array of Outputs -// Used by unspent output pool, spent tests -type UxArray []UxOut - -// Hashes returns Array of hashes for the Ux in the UxArray -func (ua UxArray) Hashes() []cipher.SHA256 { - hashes := make([]cipher.SHA256, len(ua)) - for i, ux := range ua { - hashes[i] = ux.Hash() - } - return hashes -} - -// HasDupes checks the UxArray for outputs which have the same hash -func (ua UxArray) HasDupes() bool { - m := make(UxHashSet, len(ua)) - for i := range ua { - h := ua[i].Hash() - if _, ok := m[h]; ok { - return true - } - m[h] = struct{}{} - } - return false -} - -// Set returns the UxArray as a hash to struct{} map to be used as a set. -// Should only be used for membership detection -func (ua UxArray) Set() UxHashSet { - m := make(UxHashSet, len(ua)) - for i := range ua { - m[ua[i].Hash()] = struct{}{} - } - return m -} - -// Sort sorts UxArray -func (ua UxArray) Sort() { - sort.Sort(ua) -} - -// Len returns length of UxArray -func (ua UxArray) Len() int { - return len(ua) -} - -// Less checks if UxArray[i] < UxArray[j] -func (ua UxArray) Less(i, j int) bool { - hash1 := ua[i].Hash() - hash2 := ua[j].Hash() - return bytes.Compare(hash1[:], hash2[:]) < 0 -} - -// Swap swaps value of UxArray[i] and UxArray[j] -func (ua UxArray) Swap(i, j int) { - ua[i], ua[j] = ua[j], ua[i] -} - -// Coins returns the total coins -func (ua UxArray) Coins() (uint64, error) { - var coins uint64 - for _, ux := range ua { - var err error - coins, err = mathutil.AddUint64(coins, ux.Body.Coins) - if err != nil { - return 0, errors.New("UxArray.Coins addition overflow") - } - } - - return coins, nil -} - -// CoinHours returns the total coin hours -func (ua UxArray) CoinHours(headTime uint64) (uint64, error) { - var hours uint64 - for _, ux := range ua { - uxHours, err := ux.CoinHours(headTime) - if err != nil { - return 0, err - } - - hours, err = mathutil.AddUint64(hours, uxHours) - if err != nil { - return 0, errors.New("UxArray.CoinHours addition overflow") - } - } - return hours, nil -} - -// AddressUxOuts maps address with uxarray -type AddressUxOuts map[cipher.Address]UxArray - -// NewAddressUxOuts creates address uxouts map -func NewAddressUxOuts(uxs UxArray) AddressUxOuts { - uxo := make(AddressUxOuts) - for _, ux := range uxs { - uxo[ux.Body.Address] = append(uxo[ux.Body.Address], ux) - } - return uxo -} - -// Keys returns the Address keys -func (auo AddressUxOuts) Keys() []cipher.Address { - addrs := make([]cipher.Address, len(auo)) - i := 0 - for k := range auo { - addrs[i] = k - i++ - } - return addrs -} - -// Flatten converts an AddressUxOuts map to a UxArray -func (auo AddressUxOuts) Flatten() UxArray { - oxs := make(UxArray, 0, len(auo)) - for _, uxs := range auo { - for i := range uxs { - oxs = append(oxs, uxs[i]) - } - } - return oxs -} - -// Sub returns a new set of unspents, with unspents found in other removed. -// No address's unspent set will be empty -// Depreciate this: only visor uses it -func (auo AddressUxOuts) Sub(other AddressUxOuts) AddressUxOuts { - ox := make(AddressUxOuts, len(auo)) - for a, uxs := range auo { - if suxs, ok := other[a]; ok { - ouxs := uxs.Sub(suxs) - if len(ouxs) > 0 { - ox[a] = ouxs - } - } else { - ox[a] = uxs - } - } - return ox -} - -// Add returns a new unspents, with merged unspents -func (auo AddressUxOuts) Add(other AddressUxOuts) AddressUxOuts { - ox := make(AddressUxOuts, len(auo)) - for a, o := range auo { - ox[a] = o - } - - for a, uxs := range other { - if suxs, ok := ox[a]; ok { - ox[a] = suxs.Add(uxs) - } else { - ox[a] = uxs - } - } - return ox -} - -// Sub returns a new UxArray with elements in other removed from self -// Deprecate -func (ua UxArray) Sub(other UxArray) UxArray { - uxa := make(UxArray, 0) - m := other.Set() - for i := range ua { - if _, ok := m[ua[i].Hash()]; !ok { - uxa = append(uxa, ua[i]) - } - } - return uxa -} - -// Add returns a new UxArray with merged elements -func (ua UxArray) Add(other UxArray) UxArray { - m := ua.Set() - for i := range other { - if _, ok := m[other[i].Hash()]; !ok { - ua = append(ua, other[i]) - } - } - return ua -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_inputs_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_inputs_skyencoder.go deleted file mode 100644 index af07767..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_inputs_skyencoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeTransactionInputs computes the size of an encoded object of type transactionInputs -func encodeSizeTransactionInputs(obj *transactionInputs) uint64 { - i0 := uint64(0) - - // obj.In - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.In)) * i1 - } - - return i0 -} - -// encodeTransactionInputs encodes an object of type transactionInputs to a buffer allocated to the exact size -// required to encode the object. -func encodeTransactionInputs(obj *transactionInputs) ([]byte, error) { - n := encodeSizeTransactionInputs(obj) - buf := make([]byte, n) - - if err := encodeTransactionInputsToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeTransactionInputsToBuffer encodes an object of type transactionInputs to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeTransactionInputsToBuffer(buf []byte, obj *transactionInputs) error { - if uint64(len(buf)) < encodeSizeTransactionInputs(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.In maxlen check - if len(obj.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.In length check - if uint64(len(obj.In)) > math.MaxUint32 { - return errors.New("obj.In length exceeds math.MaxUint32") - } - - // obj.In length - e.Uint32(uint32(len(obj.In))) - - // obj.In - for _, x := range obj.In { - - // x - e.CopyBytes(x[:]) - - } - - return nil -} - -// decodeTransactionInputs decodes an object of type transactionInputs from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeTransactionInputs(buf []byte, obj *transactionInputs) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.In = make([]cipher.SHA256, length) - - for z1 := range obj.In { - { - // obj.In[z1] - if len(d.Buffer) < len(obj.In[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.In[z1][:], d.Buffer[:len(obj.In[z1])]) - d.Buffer = d.Buffer[len(obj.In[z1]):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeTransactionInputsExact decodes an object of type transactionInputs from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeTransactionInputsExact(buf []byte, obj *transactionInputs) error { - if n, err := decodeTransactionInputs(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_outputs_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_outputs_skyencoder.go deleted file mode 100644 index 2169757..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_outputs_skyencoder.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeTransactionOutputs computes the size of an encoded object of type transactionOutputs -func encodeSizeTransactionOutputs(obj *transactionOutputs) uint64 { - i0 := uint64(0) - - // obj.Out - i0 += 4 - { - i1 := uint64(0) - - // x1.Address.Version - i1++ - - // x1.Address.Key - i1 += 20 - - // x1.Coins - i1 += 8 - - // x1.Hours - i1 += 8 - - i0 += uint64(len(obj.Out)) * i1 - } - - return i0 -} - -// encodeTransactionOutputs encodes an object of type transactionOutputs to a buffer allocated to the exact size -// required to encode the object. -func encodeTransactionOutputs(obj *transactionOutputs) ([]byte, error) { - n := encodeSizeTransactionOutputs(obj) - buf := make([]byte, n) - - if err := encodeTransactionOutputsToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeTransactionOutputsToBuffer encodes an object of type transactionOutputs to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeTransactionOutputsToBuffer(buf []byte, obj *transactionOutputs) error { - if uint64(len(buf)) < encodeSizeTransactionOutputs(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Out maxlen check - if len(obj.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Out length check - if uint64(len(obj.Out)) > math.MaxUint32 { - return errors.New("obj.Out length exceeds math.MaxUint32") - } - - // obj.Out length - e.Uint32(uint32(len(obj.Out))) - - // obj.Out - for _, x := range obj.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - return nil -} - -// decodeTransactionOutputs decodes an object of type transactionOutputs from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeTransactionOutputs(buf []byte, obj *transactionOutputs) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Out = make([]TransactionOutput, length) - - for z1 := range obj.Out { - { - // obj.Out[z1].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Out[z1].Address.Version = i - } - - { - // obj.Out[z1].Address.Key - if len(d.Buffer) < len(obj.Out[z1].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Out[z1].Address.Key[:], d.Buffer[:len(obj.Out[z1].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Out[z1].Address.Key):] - } - - { - // obj.Out[z1].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Coins = i - } - - { - // obj.Out[z1].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Hours = i - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeTransactionOutputsExact decodes an object of type transactionOutputs from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeTransactionOutputsExact(buf []byte, obj *transactionOutputs) error { - if n, err := decodeTransactionOutputs(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_skyencoder.go deleted file mode 100644 index 538f1c9..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/transaction_skyencoder.go +++ /dev/null @@ -1,357 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeTransaction computes the size of an encoded object of type Transaction -func encodeSizeTransaction(obj *Transaction) uint64 { - i0 := uint64(0) - - // obj.Length - i0 += 4 - - // obj.Type - i0++ - - // obj.InnerHash - i0 += 32 - - // obj.Sigs - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 65 - - i0 += uint64(len(obj.Sigs)) * i1 - } - - // obj.In - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.In)) * i1 - } - - // obj.Out - i0 += 4 - { - i1 := uint64(0) - - // x1.Address.Version - i1++ - - // x1.Address.Key - i1 += 20 - - // x1.Coins - i1 += 8 - - // x1.Hours - i1 += 8 - - i0 += uint64(len(obj.Out)) * i1 - } - - return i0 -} - -// encodeTransaction encodes an object of type Transaction to a buffer allocated to the exact size -// required to encode the object. -func encodeTransaction(obj *Transaction) ([]byte, error) { - n := encodeSizeTransaction(obj) - buf := make([]byte, n) - - if err := encodeTransactionToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeTransactionToBuffer encodes an object of type Transaction to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeTransactionToBuffer(buf []byte, obj *Transaction) error { - if uint64(len(buf)) < encodeSizeTransaction(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Length - e.Uint32(obj.Length) - - // obj.Type - e.Uint8(obj.Type) - - // obj.InnerHash - e.CopyBytes(obj.InnerHash[:]) - - // obj.Sigs maxlen check - if len(obj.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Sigs length check - if uint64(len(obj.Sigs)) > math.MaxUint32 { - return errors.New("obj.Sigs length exceeds math.MaxUint32") - } - - // obj.Sigs length - e.Uint32(uint32(len(obj.Sigs))) - - // obj.Sigs - for _, x := range obj.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // obj.In maxlen check - if len(obj.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.In length check - if uint64(len(obj.In)) > math.MaxUint32 { - return errors.New("obj.In length exceeds math.MaxUint32") - } - - // obj.In length - e.Uint32(uint32(len(obj.In))) - - // obj.In - for _, x := range obj.In { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Out maxlen check - if len(obj.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Out length check - if uint64(len(obj.Out)) > math.MaxUint32 { - return errors.New("obj.Out length exceeds math.MaxUint32") - } - - // obj.Out length - e.Uint32(uint32(len(obj.Out))) - - // obj.Out - for _, x := range obj.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - return nil -} - -// decodeTransaction decodes an object of type Transaction from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeTransaction(buf []byte, obj *Transaction) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Length = i - } - - { - // obj.Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Type = i - } - - { - // obj.InnerHash - if len(d.Buffer) < len(obj.InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.InnerHash[:], d.Buffer[:len(obj.InnerHash)]) - d.Buffer = d.Buffer[len(obj.InnerHash):] - } - - { - // obj.Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Sigs = make([]cipher.Sig, length) - - for z1 := range obj.Sigs { - { - // obj.Sigs[z1] - if len(d.Buffer) < len(obj.Sigs[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Sigs[z1][:], d.Buffer[:len(obj.Sigs[z1])]) - d.Buffer = d.Buffer[len(obj.Sigs[z1]):] - } - - } - } - } - - { - // obj.In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.In = make([]cipher.SHA256, length) - - for z1 := range obj.In { - { - // obj.In[z1] - if len(d.Buffer) < len(obj.In[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.In[z1][:], d.Buffer[:len(obj.In[z1])]) - d.Buffer = d.Buffer[len(obj.In[z1]):] - } - - } - } - } - - { - // obj.Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Out = make([]TransactionOutput, length) - - for z1 := range obj.Out { - { - // obj.Out[z1].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Out[z1].Address.Version = i - } - - { - // obj.Out[z1].Address.Key - if len(d.Buffer) < len(obj.Out[z1].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Out[z1].Address.Key[:], d.Buffer[:len(obj.Out[z1].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Out[z1].Address.Key):] - } - - { - // obj.Out[z1].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Coins = i - } - - { - // obj.Out[z1].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Hours = i - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeTransactionExact decodes an object of type Transaction from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeTransactionExact(buf []byte, obj *Transaction) error { - if n, err := decodeTransaction(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/transactions.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/transactions.go deleted file mode 100644 index 5d340ac..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/transactions.go +++ /dev/null @@ -1,788 +0,0 @@ -package coin - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "log" - "math" - "sort" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -var ( - // DebugLevel1 checks for extremely unlikely conditions (10e-40) - DebugLevel1 = true - // DebugLevel2 enable checks for impossible conditions - DebugLevel2 = true -) - -//go:generate skyencoder -struct Transaction -unexported -//go:generate skyencoder -struct transactionInputs -//go:generate skyencoder -struct transactionOutputs - -type transactionInputs struct { - In []cipher.SHA256 `enc:",maxlen=65535"` -} - -type transactionOutputs struct { - Out []TransactionOutput `enc:",maxlen=65535"` -} - -/* -Transaction with N inputs, M ouputs is -- 32 bytes constant -- 32+65 bytes per input -- 21+8+8 bytes per output - -Skycoin Transactions are -- 97 bytes per input + 37 bytes per output + 37 bytes -Bitcoin Transactions are -- 180 bytes per input + 34 bytes per output + 10 bytes - -Sigs is the array of signatures -- the Nth signature is the authorization to spend the Nth output consumed in transaction -- the hash signed is SHA256sum of transaction inner hash and the hash of output being spent - -The inner hash is SHA256 hash of the serialization of Input and Output array -The outer hash is the hash of the whole transaction serialization -*/ - -// Transaction transaction struct -type Transaction struct { - Length uint32 // length prefix - Type uint8 // transaction type - InnerHash cipher.SHA256 // inner hash SHA256 of In[],Out[] - - Sigs []cipher.Sig `enc:",maxlen=65535"` // list of signatures, 64+1 bytes each - In []cipher.SHA256 `enc:",maxlen=65535"` // ouputs being spent - Out []TransactionOutput `enc:",maxlen=65535"` // ouputs being created -} - -// TransactionOutput hash output/name is function of Hash -type TransactionOutput struct { - Address cipher.Address // address to send to - Coins uint64 // amount to be sent in coins - Hours uint64 // amount to be sent in coin hours -} - -// Verify attempts to determine if the transaction is well formed. -// Verify cannot check transaction signatures, it needs the address from unspents -// Verify cannot check if outputs being spent exist -// Verify cannot check if the transaction would create or destroy coins -// or if the inputs have the required coin base -func (txn *Transaction) Verify() error { - return txn.verify(true) -} - -// VerifyUnsigned attempts to determine if the transaction is well formed, -// but requires the transaction to have at least one null signature. -// Verify cannot check transaction signatures, it needs the address from unspents -// Verify cannot check if outputs being spent exist -// Verify cannot check if the transaction would create or destroy coins -// or if the inputs have the required coin base -func (txn *Transaction) VerifyUnsigned() error { - return txn.verify(false) -} - -func (txn *Transaction) verify(signed bool) error { - if len(txn.In) == 0 { - return errors.New("No inputs") - } - if len(txn.Out) == 0 { - return errors.New("No outputs") - } - - // Check signature index fields - if len(txn.Sigs) != len(txn.In) { - return errors.New("Invalid number of signatures") - } - if len(txn.Sigs) > math.MaxUint16 { - return errors.New("Too many signatures and inputs") - } - - if len(txn.Out) > math.MaxUint16 { - return errors.New("Too many ouptuts") - } - - // Check duplicate inputs - uxOuts := make(map[cipher.SHA256]struct{}, len(txn.In)) - for i := range txn.In { - uxOuts[txn.In[i]] = struct{}{} - } - if len(uxOuts) != len(txn.In) { - return errors.New("Duplicate spend") - } - - if txn.Type != 0 { - return errors.New("transaction type invalid") - } - - // Prevent zero coin outputs - // Artificial restriction to prevent spam - for _, txo := range txn.Out { - if txo.Coins == 0 { - return errors.New("Zero coin output") - } - } - - // Check output coin integer overflow - coins := uint64(0) - for _, to := range txn.Out { - var err error - coins, err = mathutil.AddUint64(coins, to.Coins) - if err != nil { - return errors.New("Output coins overflow") - } - } - - // Check that Size and Hash can be computed - txnSize, txnHash, err := txn.SizeHash() - if err != nil { - return err - } - - // Check txn Size set correctly - if txn.Length != txnSize { - return errors.New("Incorrect transaction length") - } - - // Check for duplicate potential outputs - outputs := make(map[cipher.SHA256]struct{}, len(txn.Out)) - uxb := UxBody{ - SrcTransaction: txnHash, - } - for _, to := range txn.Out { - uxb.Coins = to.Coins - uxb.Hours = to.Hours - uxb.Address = to.Address - outputs[uxb.Hash()] = struct{}{} - } - if len(outputs) != len(txn.Out) { - return errors.New("Duplicate output in transaction") - } - - // Check inner hash - innerHash, err := txn.hashInner() - if err != nil { - return fmt.Errorf("HashInner failed: %v", err) - } - - if innerHash != txn.InnerHash { - return errors.New("InnerHash does not match computed hash") - } - - // Validate signatures - for i, sig := range txn.Sigs { - if sig.Null() { - // Check that signed transactions do not have any null signatures - if signed { - return errors.New("Unsigned input in transaction") - } - // Ignore null signatures if the transaction is unsigned - continue - } - - hash := cipher.AddSHA256(txn.InnerHash, txn.In[i]) - if err := cipher.VerifySignatureRecoverPubKey(sig, hash); err != nil { - return err - } - } - - // Check that unsigned transactions have at least one non-null signature - if !signed { - if !txn.hasNullSignature() { - return errors.New("Unsigned transaction must contain a null signature") - } - } - - return nil -} - -func (txn Transaction) verifyInputSignaturesPrelude(uxIn UxArray) error { - if len(txn.In) != len(uxIn) { - return errors.New("txn.In != uxIn") - } - if len(txn.In) != len(txn.Sigs) { - return errors.New("txn.In != txn.Sigs") - } - if txn.InnerHash != txn.HashInner() { - return errors.New("Invalid Tx Inner Hash") - } - for i := range txn.In { - if txn.In[i] != uxIn[i].Hash() { - return errors.New("Ux hash mismatch") - } - } - return nil -} - -// VerifyInputSignatures verifies the inputs and signatures -func (txn Transaction) VerifyInputSignatures(uxIn UxArray) error { - if err := txn.verifyInputSignaturesPrelude(uxIn); err != nil { - if DebugLevel2 { - log.Panic(err) - } - return err - } - - // Check signatures against unspent address - for i := range txn.In { - if txn.Sigs[i].Null() { - return errors.New("Unsigned input in transaction") - } - - hash := cipher.AddSHA256(txn.InnerHash, txn.In[i]) // use inner hash, not outer hash - err := cipher.VerifyAddressSignedHash(uxIn[i].Body.Address, txn.Sigs[i], hash) - if err != nil { - return errors.New("Signature not valid for output being spent") - } - } - - return nil -} - -// VerifyPartialInputSignatures verifies the inputs and signatures for signatures that are not null -func (txn Transaction) VerifyPartialInputSignatures(uxIn UxArray) error { - if err := txn.verifyInputSignaturesPrelude(uxIn); err != nil { - if DebugLevel2 { - log.Panic(err) - } - return err - } - - // Check signatures against unspent address for signatures that are not null - for i := range txn.In { - if txn.Sigs[i].Null() { - continue - } - hash := cipher.AddSHA256(txn.InnerHash, txn.In[i]) // use inner hash, not outer hash - err := cipher.VerifyAddressSignedHash(uxIn[i].Body.Address, txn.Sigs[i], hash) - if err != nil { - return errors.New("Signature not valid for output being spent") - } - } - - return nil -} - -// PushInput adds a unspent output hash to the inputs of a Transaction. -func (txn *Transaction) PushInput(uxOut cipher.SHA256) error { - if len(txn.In) >= math.MaxUint16 { - return errors.New("Max transaction inputs reached") - } - txn.In = append(txn.In, uxOut) - return nil -} - -// UxID compute transaction output id -func (txOut TransactionOutput) UxID(txID cipher.SHA256) cipher.SHA256 { - var x UxBody - x.Coins = txOut.Coins - x.Hours = txOut.Hours - x.Address = txOut.Address - x.SrcTransaction = txID - return x.Hash() -} - -// PushOutput Adds a TransactionOutput, sending coins & hours to an Address -func (txn *Transaction) PushOutput(dst cipher.Address, coins, hours uint64) error { - if len(txn.Out) >= math.MaxUint16 { - return errors.New("Max transaction outputs reached") - } - txn.Out = append(txn.Out, TransactionOutput{ - Address: dst, - Coins: coins, - Hours: hours, - }) - return nil -} - -// SignInput signs a specific input in the transaction. -// InnerHash should already be set to a valid value. -// Returns an error if the input is already signed -func (txn *Transaction) SignInput(key cipher.SecKey, index int) error { - if index < 0 || index >= len(txn.In) { - return errors.New("Signature index out of range") - } - - if len(txn.Sigs) == 0 { - txn.Sigs = make([]cipher.Sig, len(txn.In)) - } - if len(txn.In) != len(txn.Sigs) { - return errors.New("Number of signatures does not match number of inputs") - } - - if !txn.Sigs[index].Null() { - return errors.New("Input already signed") - } - - h := cipher.AddSHA256(txn.InnerHash, txn.In[index]) - txn.Sigs[index] = cipher.MustSignHash(h, key) - - return nil -} - -// SignInputs signs all inputs in the transaction -func (txn *Transaction) SignInputs(keys []cipher.SecKey) { - if len(keys) != len(txn.In) { - log.Panic("Invalid number of keys") - } - if len(keys) > math.MaxUint16 { - log.Panic("Too many keys") - } - if len(keys) == 0 { - log.Panic("No keys") - } - if len(txn.Sigs) > 0 && txn.hasNonNullSignature() { - log.Panic("Transaction has been signed") - } - - txn.InnerHash = txn.HashInner() // update hash - - sigs := make([]cipher.Sig, len(txn.In)) - for i, k := range keys { - h := cipher.AddSHA256(txn.InnerHash, txn.In[i]) // hash to sign - sigs[i] = cipher.MustSignHash(h, k) - } - txn.Sigs = sigs -} - -// Size returns the encoded byte size of the transaction -func (txn *Transaction) Size() (uint32, error) { - buf, err := txn.Serialize() - if err != nil { - return 0, err - } - return mathutil.IntToUint32(len(buf)) -} - -// IsFullyUnsigned returns true if the transaction is not signed for any input. -// Unsigned transactions have a full signature array, but the signatures are null. -// Returns true if the signatures array is empty. -func (txn *Transaction) IsFullyUnsigned() bool { - for _, s := range txn.Sigs { - if !s.Null() { - return false - } - } - - return true -} - -// IsFullySigned returns true if the transaction is fully signed. -// Returns true if the signatures array is empty. -func (txn *Transaction) IsFullySigned() bool { - if len(txn.Sigs) == 0 { - return false - } - - for _, s := range txn.Sigs { - if s.Null() { - return false - } - } - - return true -} - -// hasNonNullSignature returns true if the transaction has at least one non-null signature -func (txn *Transaction) hasNonNullSignature() bool { - for _, s := range txn.Sigs { - if !s.Null() { - return true - } - } - - return false -} - -// hasNullSignature returns true if the transaction has at least one null signature -func (txn *Transaction) hasNullSignature() bool { - for _, s := range txn.Sigs { - if s.Null() { - return true - } - } - - return false -} - -// Hash an entire Transaction struct, including the TransactionHeader -func (txn *Transaction) Hash() cipher.SHA256 { - b, err := txn.Serialize() - if err != nil { - log.Panicf("Hash: txn.Serialize failed: %v", err) - } - return cipher.SumSHA256(b) -} - -// SizeHash returns the encoded size and the hash of it (avoids duplicate encoding) -func (txn *Transaction) SizeHash() (uint32, cipher.SHA256, error) { - b, err := txn.Serialize() - if err != nil { - return 0, cipher.SHA256{}, err - } - s, err := mathutil.IntToUint32(len(b)) - if err != nil { - return 0, cipher.SHA256{}, err - } - return s, cipher.SumSHA256(b), nil -} - -// UpdateHeader saves the txn body hash to TransactionHeader.Hash -func (txn *Transaction) UpdateHeader() error { - s, err := txn.Size() - if err != nil { - return err - } - txn.Length = s - txn.Type = byte(0x00) - txn.InnerHash = txn.HashInner() - return nil -} - -// HashInner hashes only the Transaction Inputs & Outputs -// This is what is signed -// Client hashes the inner hash with hash of output being spent and signs it with private key -func (txn *Transaction) HashInner() cipher.SHA256 { - h, err := txn.hashInner() - if err != nil { - log.Panicf("hashInner failed: %v", err) - } - return h -} - -func (txn *Transaction) hashInner() (cipher.SHA256, error) { - txnInputs := &transactionInputs{ - In: txn.In, - } - txnOutputs := &transactionOutputs{ - Out: txn.Out, - } - n1 := encodeSizeTransactionInputs(txnInputs) - n2 := encodeSizeTransactionOutputs(txnOutputs) - buf := make([]byte, n1+n2) - - if err := encodeTransactionInputsToBuffer(buf[:n1], txnInputs); err != nil { - return cipher.SHA256{}, fmt.Errorf("encodeTransactionInputsToBuffer failed: %v", err) - } - - if err := encodeTransactionOutputsToBuffer(buf[n1:], txnOutputs); err != nil { - return cipher.SHA256{}, fmt.Errorf("encodeTransactionOutputsToBuffer failed: %v", err) - } - - return cipher.SumSHA256(buf), nil -} - -// MustSerialize serializes the transaction to bytes, panics on error. -// Serialization can fail if the transaction has too many elements in its arrays -func (txn *Transaction) MustSerialize() []byte { - b, err := encodeTransaction(txn) - if err != nil { - log.Panicf("encodeTransaction failed: %v", err) - } - return b -} - -// Serialize serializes the transaction to bytes. -// Serialization can fail if the transaction has too many elements in its arrays -func (txn *Transaction) Serialize() ([]byte, error) { - return encodeTransaction(txn) -} - -// MustSerializeHex serializes the transaction to a hex string, panics on error. -// Serialization can fail if the transaction has too many elements in its arrays -func (txn *Transaction) MustSerializeHex() string { - return hex.EncodeToString(txn.MustSerialize()) -} - -// SerializeHex serializes the transaction to a hex string. -// Serialization can fail if the transaction has too many elements in its arrays -func (txn *Transaction) SerializeHex() (string, error) { - b, err := txn.Serialize() - if err != nil { - return "", err - } - return hex.EncodeToString(b), nil -} - -// MustDeserializeTransaction deserializes a transaction, panics on error -func MustDeserializeTransaction(b []byte) Transaction { - txn, err := DeserializeTransaction(b) - if err != nil { - log.Panicf("Failed to deserialize transaction: %v", err) - } - return txn -} - -// DeserializeTransaction deserializes a transaction -func DeserializeTransaction(b []byte) (Transaction, error) { - txn := Transaction{} - if err := decodeTransactionExact(b, &txn); err != nil { - return Transaction{}, fmt.Errorf("Invalid transaction: %v", err) - } - return txn, nil -} - -// MustDeserializeTransactionHex deserializes a transaction hex string, panics on error -func MustDeserializeTransactionHex(s string) Transaction { - txn, err := DeserializeTransactionHex(s) - if err != nil { - log.Panicf("Failed to deserialize transaction: %v", err) - } - return txn -} - -// DeserializeTransactionHex deserializes a transaction hex string -func DeserializeTransactionHex(s string) (Transaction, error) { - b, err := hex.DecodeString(s) - if err != nil { - return Transaction{}, err - } - return DeserializeTransaction(b) -} - -// OutputHours returns the coin hours sent as outputs. This does not include the fee. -func (txn *Transaction) OutputHours() (uint64, error) { - hours := uint64(0) - for i := range txn.Out { - var err error - hours, err = mathutil.AddUint64(hours, txn.Out[i].Hours) - if err != nil { - return 0, errors.New("Transaction output hours overflow") - } - } - return hours, nil -} - -// Transactions transaction slice -type Transactions []Transaction - -// Fees calculates all the fees in Transactions -func (txns Transactions) Fees(calc FeeCalculator) (uint64, error) { - total := uint64(0) - for i := range txns { - fee, err := calc(&txns[i]) - if err != nil { - return 0, err - } - - total, err = mathutil.AddUint64(total, fee) - if err != nil { - return 0, errors.New("Transactions fee totals overflow") - } - } - return total, nil -} - -// Hashes caculate transactions hashes -func (txns Transactions) Hashes() []cipher.SHA256 { - hashes := make([]cipher.SHA256, len(txns)) - for i := range txns { - hashes[i] = txns[i].Hash() - } - return hashes -} - -// Size returns the sum of contained Transactions' sizes. It is not the size if -// serialized, since that would have a length prefix. -func (txns Transactions) Size() (uint32, error) { - var size uint32 - for i := range txns { - s, err := txns[i].Size() - if err != nil { - return 0, err - } - - size, err = mathutil.AddUint32(size, s) - if err != nil { - return 0, err - } - } - - return size, nil -} - -// TruncateBytesTo returns the first n transactions whose total size is less than or equal to size -func (txns Transactions) TruncateBytesTo(size uint32) (Transactions, error) { - var total uint32 - for i := range txns { - pending, err := txns[i].Size() - if err != nil { - return nil, err - } - - pendingTotal, err := mathutil.AddUint32(total, pending) - if err != nil { - return txns[:i], nil - } - - if pendingTotal > size { - return txns[:i], nil - } - - total = pendingTotal - } - - return txns, nil -} - -// SortableTransactions allows sorting transactions by fee & hash -type SortableTransactions struct { - Transactions Transactions - Fees []uint64 - Hashes []cipher.SHA256 -} - -// FeeCalculator given a transaction, return its fee or an error if the fee cannot be calculated -type FeeCalculator func(*Transaction) (uint64, error) - -// SortTransactions returns transactions sorted by fee per kB, and sorted by lowest hash if tied. -// Transactions that fail in fee computation are excluded -func SortTransactions(txns Transactions, feeCalc FeeCalculator) (Transactions, error) { - sorted, err := NewSortableTransactions(txns, feeCalc) - if err != nil { - return nil, err - } - sorted.Sort() - return sorted.Transactions, nil -} - -// NewSortableTransactions returns an array of txns that can be sorted by fee. -// On creation, fees are calculated, and if any txns have invalid fee, there are removed from consideration -func NewSortableTransactions(txns Transactions, feeCalc FeeCalculator) (*SortableTransactions, error) { - newTxns := make(Transactions, len(txns)) - fees := make([]uint64, len(txns)) - hashes := make([]cipher.SHA256, len(txns)) - j := 0 - for i := range txns { - fee, err := feeCalc(&txns[i]) - if err != nil { - continue - } - - size, hash, err := txns[i].SizeHash() - if err != nil { - return nil, err - } - - // Calculate fee priority based on fee per kb - feeKB, err := mathutil.MultUint64(fee, 1024) - - // If the fee * 1024 would exceed math.MaxUint64, set it to math.MaxUint64 so that - // this transaction can still be processed - if err != nil { - feeKB = math.MaxUint64 - } - - newTxns[j] = txns[i] - hashes[j] = hash - fees[j] = feeKB / uint64(size) - j++ - } - - return &SortableTransactions{ - Transactions: newTxns[:j], - Fees: fees[:j], - Hashes: hashes[:j], - }, nil -} - -// Sort sorts by tx fee, and then by hash if fee equal -func (txns SortableTransactions) Sort() { - sort.Sort(txns) -} - -// Len returns length of transactions -func (txns SortableTransactions) Len() int { - return len(txns.Transactions) -} - -// Less default sorting is fees descending, hash ascending if fees equal -func (txns SortableTransactions) Less(i, j int) bool { - if txns.Fees[i] == txns.Fees[j] { - // If fees match, hashes are sorted ascending - return bytes.Compare(txns.Hashes[i][:], txns.Hashes[j][:]) < 0 - } - // Fees are sorted descending - return txns.Fees[i] > txns.Fees[j] -} - -// Swap swaps txns -func (txns SortableTransactions) Swap(i, j int) { - txns.Transactions[i], txns.Transactions[j] = txns.Transactions[j], txns.Transactions[i] - txns.Fees[i], txns.Fees[j] = txns.Fees[j], txns.Fees[i] - txns.Hashes[i], txns.Hashes[j] = txns.Hashes[j], txns.Hashes[i] -} - -// VerifyTransactionCoinsSpending checks that coins are not destroyed or created by the transaction -func VerifyTransactionCoinsSpending(uxIn UxArray, uxOut UxArray) error { - coinsIn := uint64(0) - for i := range uxIn { - var err error - coinsIn, err = mathutil.AddUint64(coinsIn, uxIn[i].Body.Coins) - if err != nil { - return errors.New("Transaction input coins overflow") - } - } - - coinsOut := uint64(0) - for i := range uxOut { - var err error - coinsOut, err = mathutil.AddUint64(coinsOut, uxOut[i].Body.Coins) - if err != nil { - return errors.New("Transaction output coins overflow") - } - } - - if coinsIn < coinsOut { - return errors.New("Insufficient coins") - } - if coinsIn > coinsOut { - return errors.New("Transactions may not destroy coins") - } - - return nil -} - -// VerifyTransactionHoursSpending checks that hours are not created by the transaction -func VerifyTransactionHoursSpending(headTime uint64, uxIn UxArray, uxOut UxArray) error { - hoursIn := uint64(0) - for i := range uxIn { - uxHours, err := uxIn[i].CoinHours(headTime) - if err != nil { - // If the error was specifically an overflow when adding the - // earned coin hours to the base coin hours, treat the uxHours as 0. - // Block 13277 spends an input which overflows in this way, - // so the block will not sync if an error is returned. - if err == ErrAddEarnedCoinHoursAdditionOverflow { - uxHours = 0 - } else { - return err - } - } - - hoursIn, err = mathutil.AddUint64(hoursIn, uxHours) - if err != nil { - return errors.New("Transaction input hours overflow") - } - } - - hoursOut := uint64(0) - for i := range uxOut { - // NOTE: addition of hours is not checked for overflow here because - // this would invalidate existing blocks which had overflowed hours. - // Hours overflow checks are handled as a "soft" constraint in the network - // until those blocks are repaired. - hoursOut += uxOut[i].Body.Hours - } - - if hoursIn < hoursOut { - return errors.New("Insufficient coin hours") - } - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_body_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_body_skyencoder.go deleted file mode 100644 index f5e4026..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_body_skyencoder.go +++ /dev/null @@ -1,138 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeUxBody computes the size of an encoded object of type UxBody -func encodeSizeUxBody(obj *UxBody) uint64 { - i0 := uint64(0) - - // obj.SrcTransaction - i0 += 32 - - // obj.Address.Version - i0++ - - // obj.Address.Key - i0 += 20 - - // obj.Coins - i0 += 8 - - // obj.Hours - i0 += 8 - - return i0 -} - -// encodeUxBody encodes an object of type UxBody to a buffer allocated to the exact size -// required to encode the object. -func encodeUxBody(obj *UxBody) ([]byte, error) { - n := encodeSizeUxBody(obj) - buf := make([]byte, n) - - if err := encodeUxBodyToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUxBodyToBuffer encodes an object of type UxBody to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUxBodyToBuffer(buf []byte, obj *UxBody) error { - if uint64(len(buf)) < encodeSizeUxBody(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.SrcTransaction - e.CopyBytes(obj.SrcTransaction[:]) - - // obj.Address.Version - e.Uint8(obj.Address.Version) - - // obj.Address.Key - e.CopyBytes(obj.Address.Key[:]) - - // obj.Coins - e.Uint64(obj.Coins) - - // obj.Hours - e.Uint64(obj.Hours) - - return nil -} - -// decodeUxBody decodes an object of type UxBody from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUxBody(buf []byte, obj *UxBody) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.SrcTransaction - if len(d.Buffer) < len(obj.SrcTransaction) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.SrcTransaction[:], d.Buffer[:len(obj.SrcTransaction)]) - d.Buffer = d.Buffer[len(obj.SrcTransaction):] - } - - { - // obj.Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Address.Version = i - } - - { - // obj.Address.Key - if len(d.Buffer) < len(obj.Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Address.Key[:], d.Buffer[:len(obj.Address.Key)]) - d.Buffer = d.Buffer[len(obj.Address.Key):] - } - - { - // obj.Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Coins = i - } - - { - // obj.Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Hours = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUxBodyExact decodes an object of type UxBody from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUxBodyExact(buf []byte, obj *UxBody) error { - if n, err := decodeUxBody(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_head_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_head_skyencoder.go deleted file mode 100644 index 5563be7..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/coin/ux_head_skyencoder.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package coin - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeUxHead computes the size of an encoded object of type UxHead -func encodeSizeUxHead(obj *UxHead) uint64 { - i0 := uint64(0) - - // obj.Time - i0 += 8 - - // obj.BkSeq - i0 += 8 - - return i0 -} - -// encodeUxHead encodes an object of type UxHead to a buffer allocated to the exact size -// required to encode the object. -func encodeUxHead(obj *UxHead) ([]byte, error) { - n := encodeSizeUxHead(obj) - buf := make([]byte, n) - - if err := encodeUxHeadToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUxHeadToBuffer encodes an object of type UxHead to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUxHeadToBuffer(buf []byte, obj *UxHead) error { - if uint64(len(buf)) < encodeSizeUxHead(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Time - e.Uint64(obj.Time) - - // obj.BkSeq - e.Uint64(obj.BkSeq) - - return nil -} - -// decodeUxHead decodes an object of type UxHead from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUxHead(buf []byte, obj *UxHead) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Time = i - } - - { - // obj.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.BkSeq = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUxHeadExact decodes an object of type UxHead from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUxHeadExact(buf []byte, obj *UxHead) error { - if n, err := decodeUxHead(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_blocks_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_blocks_message_skyencoder.go deleted file mode 100644 index ca165ca..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_blocks_message_skyencoder.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeAnnounceBlocksMessage computes the size of an encoded object of type AnnounceBlocksMessage -func encodeSizeAnnounceBlocksMessage(obj *AnnounceBlocksMessage) uint64 { - i0 := uint64(0) - - // obj.MaxBkSeq - i0 += 8 - - return i0 -} - -// encodeAnnounceBlocksMessage encodes an object of type AnnounceBlocksMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeAnnounceBlocksMessage(obj *AnnounceBlocksMessage) ([]byte, error) { - n := encodeSizeAnnounceBlocksMessage(obj) - buf := make([]byte, n) - - if err := encodeAnnounceBlocksMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeAnnounceBlocksMessageToBuffer encodes an object of type AnnounceBlocksMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeAnnounceBlocksMessageToBuffer(buf []byte, obj *AnnounceBlocksMessage) error { - if uint64(len(buf)) < encodeSizeAnnounceBlocksMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.MaxBkSeq - e.Uint64(obj.MaxBkSeq) - - return nil -} - -// decodeAnnounceBlocksMessage decodes an object of type AnnounceBlocksMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeAnnounceBlocksMessage(buf []byte, obj *AnnounceBlocksMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.MaxBkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.MaxBkSeq = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeAnnounceBlocksMessageExact decodes an object of type AnnounceBlocksMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeAnnounceBlocksMessageExact(buf []byte, obj *AnnounceBlocksMessage) error { - if n, err := decodeAnnounceBlocksMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_txns_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_txns_message_skyencoder.go deleted file mode 100644 index e8406f4..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announce_txns_message_skyencoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeAnnounceTxnsMessage computes the size of an encoded object of type AnnounceTxnsMessage -func encodeSizeAnnounceTxnsMessage(obj *AnnounceTxnsMessage) uint64 { - i0 := uint64(0) - - // obj.Transactions - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Transactions)) * i1 - } - - return i0 -} - -// encodeAnnounceTxnsMessage encodes an object of type AnnounceTxnsMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeAnnounceTxnsMessage(obj *AnnounceTxnsMessage) ([]byte, error) { - n := encodeSizeAnnounceTxnsMessage(obj) - buf := make([]byte, n) - - if err := encodeAnnounceTxnsMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeAnnounceTxnsMessageToBuffer encodes an object of type AnnounceTxnsMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeAnnounceTxnsMessageToBuffer(buf []byte, obj *AnnounceTxnsMessage) error { - if uint64(len(buf)) < encodeSizeAnnounceTxnsMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Transactions maxlen check - if len(obj.Transactions) > 256 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transactions length check - if uint64(len(obj.Transactions)) > math.MaxUint32 { - return errors.New("obj.Transactions length exceeds math.MaxUint32") - } - - // obj.Transactions length - e.Uint32(uint32(len(obj.Transactions))) - - // obj.Transactions - for _, x := range obj.Transactions { - - // x - e.CopyBytes(x[:]) - - } - - return nil -} - -// decodeAnnounceTxnsMessage decodes an object of type AnnounceTxnsMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeAnnounceTxnsMessage(buf []byte, obj *AnnounceTxnsMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 256 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions = make([]cipher.SHA256, length) - - for z1 := range obj.Transactions { - { - // obj.Transactions[z1] - if len(d.Buffer) < len(obj.Transactions[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1][:], d.Buffer[:len(obj.Transactions[z1])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1]):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeAnnounceTxnsMessageExact decodes an object of type AnnounceTxnsMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeAnnounceTxnsMessageExact(buf []byte, obj *AnnounceTxnsMessage) error { - if n, err := decodeAnnounceTxnsMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announced_txns.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/announced_txns.go deleted file mode 100644 index 3ca38bd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/announced_txns.go +++ /dev/null @@ -1,44 +0,0 @@ -package daemon - -import ( - "sync" - "time" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -type announcedTxnsCache struct { - sync.Mutex - cache map[cipher.SHA256]int64 -} - -func newAnnouncedTxnsCache() *announcedTxnsCache { - return &announcedTxnsCache{ - cache: make(map[cipher.SHA256]int64), - } -} - -func (c *announcedTxnsCache) add(txns []cipher.SHA256) { - c.Lock() - defer c.Unlock() - - t := time.Now().UTC().UnixNano() - for _, txn := range txns { - c.cache[txn] = t - } -} - -func (c *announcedTxnsCache) flush() map[cipher.SHA256]int64 { - c.Lock() - defer c.Unlock() - - if len(c.cache) == 0 { - return nil - } - - cache := c.cache - - c.cache = make(map[cipher.SHA256]int64) - - return cache -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/connections.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/connections.go deleted file mode 100644 index dd94678..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/connections.go +++ /dev/null @@ -1,554 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/iputil" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -// ConnectionState connection state in the state machine -// Connections have three states: "pending", "connected" and "introduced" -// A connection in the "pending" state has been selected to establish a TCP connection, -// but the connection has not been established yet. -// Only outgoing connections will ever be in the "pending" state; -// incoming connections begin at the "connected" state. -// A connection in the "connected" state has established a TCP connection, -// but has not completed the introduction handshake. -// A connection in the "introduced" state has completed the introduction handshake. -type ConnectionState string - -const ( - // ConnectionStatePending prior to establishing a connection - ConnectionStatePending ConnectionState = "pending" - // ConnectionStateConnected connected, but not introduced - ConnectionStateConnected ConnectionState = "connected" - // ConnectionStateIntroduced connection has introduced itself - ConnectionStateIntroduced ConnectionState = "introduced" -) - -var ( - // ErrConnectionNotExist connection does not exist when performing an operation that requires it to exist - ErrConnectionNotExist = errors.New("Connection does not exist") - // ErrConnectionExists connection exists in Connections - ErrConnectionExists = errors.New("Connection exists") - // ErrConnectionIPMirrorExists connection exists for a given base IP and mirror - ErrConnectionIPMirrorExists = errors.New("Connection exists with this base IP and mirror") - // ErrConnectionStateNotConnected connect state is not "connected" - ErrConnectionStateNotConnected = errors.New("Connection state is not \"connected\"") - // ErrConnectionGnetIDMismatch gnet ID in argument does not match gnet ID on record - ErrConnectionGnetIDMismatch = errors.New("Connection gnet ID does not match") - // ErrConnectionAlreadyIntroduced attempted to make invalid state transition from introduced state - ErrConnectionAlreadyIntroduced = errors.New("Connection is already in introduced state") - // ErrConnectionAlreadyConnected attempted to make invalid state transition from connected state - ErrConnectionAlreadyConnected = errors.New("Connection is already in connected state") - // ErrInvalidGnetID invalid gnet ID value used as argument - ErrInvalidGnetID = errors.New("Invalid gnet ID") -) - -// ConnectionDetails connection data managed by daemon -type ConnectionDetails struct { - State ConnectionState - Outgoing bool - ConnectedAt time.Time - Mirror uint32 - ListenPort uint16 - ProtocolVersion int32 - Height uint64 - UserAgent useragent.Data - UnconfirmedVerifyTxn params.VerifyTxn - GenesisHash cipher.SHA256 -} - -// HasIntroduced returns true if the connection has introduced -func (c ConnectionDetails) HasIntroduced() bool { - switch c.State { - case ConnectionStateIntroduced: - return true - default: - return false - } -} - -type connection struct { - Addr string - ConnectionDetails - gnetID uint64 -} - -// ListenAddr returns the addr that connection listens on, if available -func (c *connection) ListenAddr() string { - if c.ListenPort == 0 { - return "" - } - - ip, _, err := iputil.SplitAddr(c.Addr) - if err != nil { - logger.Critical().WithError(err).WithField("addr", c.Addr).Error("connection.ListenAddr addr could not be split") - return "" - } - - return fmt.Sprintf("%s:%d", ip, c.ListenPort) -} - -// Connections manages a collection of Connection -type Connections struct { - conns map[string]*connection - mirrors map[uint32]map[string]uint16 - ipCounts map[string]int - gnetIDs map[uint64]string - listenAddrs map[string][]string - sync.Mutex -} - -// NewConnections creates Connections -func NewConnections() *Connections { - return &Connections{ - conns: make(map[string]*connection, 32), - mirrors: make(map[uint32]map[string]uint16, 32), - ipCounts: make(map[string]int, 32), - gnetIDs: make(map[uint64]string, 32), - listenAddrs: make(map[string][]string, 32), - } -} - -// pending adds a new pending outgoing connection -func (c *Connections) pending(addr string) (*connection, error) { - c.Lock() - defer c.Unlock() - - ip, port, err := iputil.SplitAddr(addr) - if err != nil { - logger.Critical().WithField("addr", addr).WithError(err).Error("Connections.pending called with invalid addr") - return nil, err - } - - if _, ok := c.conns[addr]; ok { - return nil, ErrConnectionExists - } - - c.ipCounts[ip]++ - - conn := &connection{ - Addr: addr, - ConnectionDetails: ConnectionDetails{ - State: ConnectionStatePending, - Outgoing: true, - ListenPort: port, - }, - } - - c.conns[addr] = conn - c.listenAddrs[addr] = append(c.listenAddrs[addr], addr) - - logger.WithField("addr", addr).Debug("Connections.pending") - - return c.conns[addr], nil -} - -// connected the connection has connected -func (c *Connections) connected(addr string, gnetID uint64) (*connection, error) { - c.Lock() - defer c.Unlock() - - fields := logrus.Fields{ - "addr": addr, - "gnetID": gnetID, - } - - if gnetID == 0 { - logger.Critical().WithFields(fields).WithError(ErrInvalidGnetID).Error("Connections.connected called with invalid gnetID") - return nil, ErrInvalidGnetID - } - - ip, _, err := iputil.SplitAddr(addr) - if err != nil { - logger.Critical().WithFields(fields).WithError(err).Error("Connections.connected called with invalid addr") - return nil, err - } - - conn := c.conns[addr] - - if conn == nil { - c.ipCounts[ip]++ - - conn = &connection{ - Addr: addr, - } - - c.conns[addr] = conn - } else { - fields := logrus.Fields{ - "addr": addr, - "gnetID": gnetID, - "state": conn.State, - "outgoing": conn.Outgoing, - "connGnetID": conn.gnetID, - } - - switch conn.State { - case ConnectionStatePending: - case ConnectionStateConnected: - logger.Critical().WithFields(fields).Error("Connections.connected called on already connected connection") - return nil, ErrConnectionAlreadyConnected - case ConnectionStateIntroduced: - logger.Critical().WithFields(fields).Error("Connections.connected called on already introduced connection") - return nil, ErrConnectionAlreadyIntroduced - default: - logger.WithFields(fields).Panic("Connection state invalid") - } - } - - c.gnetIDs[gnetID] = addr - conn.gnetID = gnetID - conn.ConnectedAt = time.Now().UTC() - conn.State = ConnectionStateConnected - - fields["outgoing"] = conn.Outgoing - - logger.WithFields(fields).Debug("Connections.connected") - - return conn, nil -} - -// introduced the connection has introduced itself -func (c *Connections) introduced(addr string, gnetID uint64, m *IntroductionMessage) (*connection, error) { - c.Lock() - defer c.Unlock() - - fields := logrus.Fields{ - "addr": addr, - "gnetID": gnetID, - } - - if gnetID == 0 { - logger.Critical().WithFields(fields).WithError(ErrInvalidGnetID).Error("Connections.introduced called with invalid gnetID") - return nil, ErrInvalidGnetID - } - - ip, _, err := iputil.SplitAddr(addr) - if err != nil { - logger.Critical().WithFields(fields).WithError(err).Error("Connections.introduced called with invalid addr") - return nil, err - } - - conn := c.conns[addr] - if conn == nil { - return nil, ErrConnectionNotExist - } - - fields["outgoing"] = conn.Outgoing - - errorFields := logrus.Fields{ - "state": conn.State, - "connGnetID": conn.gnetID, - } - - switch conn.State { - case ConnectionStatePending: - logger.Critical().WithFields(fields).WithFields(errorFields).Error("Connections.introduced called on pending connection") - return nil, ErrConnectionStateNotConnected - case ConnectionStateConnected: - if gnetID != conn.gnetID { - logger.Critical().WithFields(fields).WithFields(errorFields).Error("Connections.introduced called with different gnet ID") - return nil, ErrConnectionGnetIDMismatch - } - case ConnectionStateIntroduced: - logger.Critical().WithFields(fields).WithFields(errorFields).Error("Connections.introduced called on already introduced connection") - return nil, ErrConnectionAlreadyIntroduced - default: - logger.WithFields(fields).WithFields(errorFields).Panic("invalid connection state") - } - - if err := c.canUpdateMirror(ip, m.Mirror); err != nil { - logger.WithFields(fields).WithFields(errorFields).WithField("mirror", m.Mirror).WithError(err).Debug("canUpdateMirror failed") - return nil, err - } - - // For outgoing connections, which are created by pending, - // the listen port is set from the addr's port number. - // Since we are connecting to it, it is presumed to be that peer's open listening port. - // A misbehaving peer could report a different ListenPort in their IntroductionMessage, - // but it shouldn't affect our records. - if conn.Outgoing && conn.ListenPort != m.ListenPort { - logger.Critical().WithFields(fields).WithFields(logrus.Fields{ - "connListenPort": conn.ListenPort, - "introListenPort": m.ListenPort, - }).Warning("Outgoing connection's ListenPort does not match reported IntroductionMessage ListenPort") - } - - listenPort := conn.ListenPort - if !conn.Outgoing { - listenPort = m.ListenPort - } - - if err := c.updateMirror(ip, m.Mirror, listenPort); err != nil { - logger.WithFields(fields).WithField("mirror", m.Mirror).WithError(err).Panic("updateMirror failed, but shouldn't") - } - - conn.State = ConnectionStateIntroduced - conn.Mirror = m.Mirror - conn.ProtocolVersion = m.ProtocolVersion - conn.ListenPort = listenPort - conn.UserAgent = m.UserAgent - conn.UnconfirmedVerifyTxn = m.UnconfirmedVerifyTxn - conn.GenesisHash = m.GenesisHash - - if !conn.Outgoing { - listenAddr := conn.ListenAddr() - c.listenAddrs[listenAddr] = append(c.listenAddrs[listenAddr], addr) - } - - logger.WithFields(fields).Debug("Connections.introduced") - - return conn, nil -} - -// get returns a connection by address -func (c *Connections) get(addr string) *connection { - c.Lock() - defer c.Unlock() - - return c.conns[addr] -} - -func (c *Connections) getByListenAddr(listenAddr string) []*connection { - c.Lock() - defer c.Unlock() - - addrs := c.listenAddrs[listenAddr] - if len(addrs) == 0 { - return nil - } - - conns := make([]*connection, len(addrs)) - for i, a := range addrs { - conns[i] = c.conns[a] - } - - return conns -} - -func (c *Connections) getByGnetID(gnetID uint64) *connection { - c.Lock() - defer c.Unlock() - - if gnetID == 0 { - return nil - } - - addr := c.gnetIDs[gnetID] - if addr == "" { - return nil - } - - return c.conns[addr] -} - -// modify modifies a connection. -// It is unsafe to modify the Mirror value with this method -func (c *Connections) modify(addr string, gnetID uint64, f func(c *ConnectionDetails)) error { - conn := c.conns[addr] - if conn == nil { - return ErrConnectionNotExist - } - - if conn.gnetID != gnetID { - return ErrConnectionGnetIDMismatch - } - - // copy and modify - cd := conn.ConnectionDetails - - f(&cd) - - // compare to original - if cd.Mirror != conn.ConnectionDetails.Mirror { - logger.WithFields(logrus.Fields{ - "addr": addr, - "gnetID": gnetID, - }).Panic("Connections.modify connection Mirror was changed") - } - - if cd.ListenPort != conn.ConnectionDetails.ListenPort { - logger.WithFields(logrus.Fields{ - "addr": addr, - "gnetID": gnetID, - }).Panic("Connections.modify connection ListenPort was changed") - } - - conn.ConnectionDetails = cd - - return nil -} - -// SetHeight sets the height for a connection -func (c *Connections) SetHeight(addr string, gnetID uint64, height uint64) error { - c.Lock() - defer c.Unlock() - - return c.modify(addr, gnetID, func(c *ConnectionDetails) { - c.Height = height - }) -} - -func (c *Connections) updateMirror(ip string, mirror uint32, port uint16) error { - x := c.mirrors[mirror] - if x == nil { - x = make(map[string]uint16, 2) - } - - if _, ok := x[ip]; ok { - return ErrConnectionIPMirrorExists - } - - x[ip] = port - c.mirrors[mirror] = x - - return nil -} - -// canUpdateMirror returns false if a connection already exists with the same base IP and mirror value. -// This prevents duplicate connections to/from a single client. -func (c *Connections) canUpdateMirror(ip string, mirror uint32) error { - x := c.mirrors[mirror] - if x == nil { - return nil - } - - if _, ok := x[ip]; ok { - return ErrConnectionIPMirrorExists - } - - return nil -} - -// IPCount returns the number of connections for a given base IP (without port) -func (c *Connections) IPCount(ip string) int { - c.Lock() - defer c.Unlock() - return c.ipCounts[ip] -} - -// Len returns number of connections -func (c *Connections) Len() int { - c.Lock() - defer c.Unlock() - return len(c.conns) -} - -// OutgoingLen returns number of outgoing connections -func (c *Connections) OutgoingLen() int { - c.Lock() - defer c.Unlock() - n := 0 - for _, conn := range c.conns { - if conn.Outgoing { - n++ - } - } - return n -} - -// PendingLen returns the number of status pending connections -func (c *Connections) PendingLen() int { - c.Lock() - defer c.Unlock() - n := 0 - for _, conn := range c.conns { - if conn.State == ConnectionStatePending { - n++ - } - } - return n -} - -// remove removes connection. Returns an error if the addr is invalid. -// If a connection with this address does not exist, nothing happens. -func (c *Connections) remove(addr string, gnetID uint64) error { - c.Lock() - defer c.Unlock() - - ip, _, err := iputil.SplitAddr(addr) - if err != nil { - logger.Critical().WithError(err).Error("Connections.remove called with invalid addr") - return err - } - - conn := c.conns[addr] - if conn == nil { - return ErrConnectionNotExist - } - - fields := logrus.Fields{ - "addr": addr, - "connGnetID": conn.gnetID, - "gnetID": gnetID, - "listenPort": conn.ListenPort, - } - - if conn.gnetID != gnetID { - logger.Critical().WithFields(fields).Warning("Connections.remove gnetID does not match") - return ErrConnectionGnetIDMismatch - } - - x, ok := c.mirrors[conn.Mirror] - if ok { - if x[ip] != conn.ListenPort { - logger.Critical().WithFields(fields).Warning("Indexed IP+Mirror value found but the ListenPort doesn't match") - } - - delete(x, ip) - } - - if len(x) == 0 { - delete(c.mirrors, conn.Mirror) - } - - if c.ipCounts[ip] > 0 { - c.ipCounts[ip]-- - } else { - logger.Critical().WithFields(fields).Warning("ipCount was already 0 when removing existing address") - } - - listenAddr := conn.ListenAddr() - if listenAddr != "" { - addrs := c.listenAddrs[listenAddr] - for i, a := range addrs { - if a == conn.Addr { - addrs = append(addrs[:i], addrs[i+1:]...) - break - } - } - if len(addrs) == 0 { - delete(c.listenAddrs, listenAddr) - } else { - c.listenAddrs[listenAddr] = addrs - } - } - - delete(c.gnetIDs, conn.gnetID) - delete(c.conns, addr) - - return nil -} - -// all returns a copy of all connections -func (c *Connections) all() []connection { - c.Lock() - defer c.Unlock() - - conns := make([]connection, 0, len(c.conns)) - for _, c := range c.conns { - conns = append(conns, *c) - } - - return conns -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/daemon.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/daemon.go deleted file mode 100644 index a2cfbde..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/daemon.go +++ /dev/null @@ -1,1798 +0,0 @@ -/* -Package daemon controls the networking layer of the skycoin daemon -*/ -package daemon - -import ( - "errors" - "fmt" - "math/rand" - "reflect" - "sort" - "strings" - "sync" - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/daemon/gnet" - "github.com/SkycoinProject/skycoin/src/daemon/pex" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/elapse" - "github.com/SkycoinProject/skycoin/src/util/fee" - "github.com/SkycoinProject/skycoin/src/util/iputil" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/useragent" - "github.com/SkycoinProject/skycoin/src/visor" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // ErrNetworkingDisabled is returned if networking is disabled - ErrNetworkingDisabled = errors.New("Networking is disabled") - // ErrNoPeerAcceptsTxn is returned if no peer will propagate a transaction broadcasted with BroadcastUserTransaction - ErrNoPeerAcceptsTxn = errors.New("No peer will propagate this transaction") - - logger = logging.MustGetLogger("daemon") -) - -// IsBroadcastFailure returns true if an error indicates that a broadcast operation failed -func IsBroadcastFailure(err error) bool { - switch err { - case ErrNetworkingDisabled, - gnet.ErrPoolEmpty, - gnet.ErrNoMatchingConnections, - gnet.ErrNoReachableConnections, - gnet.ErrNoAddresses: - return true - default: - return false - } -} - -const ( - daemonRunDurationThreshold = time.Millisecond * 200 -) - -// Config subsystem configurations -type Config struct { - Daemon DaemonConfig - Messages MessagesConfig - Pool PoolConfig - Pex pex.Config -} - -// NewConfig returns a Config with defaults set -func NewConfig() Config { - return Config{ - Daemon: NewDaemonConfig(), - Pool: NewPoolConfig(), - Pex: pex.NewConfig(), - Messages: NewMessagesConfig(), - } -} - -// preprocess preprocess for config -func (cfg *Config) preprocess() (Config, error) { - config := *cfg - if config.Daemon.LocalhostOnly { - if config.Daemon.Address == "" { - local, err := iputil.LocalhostIP() - if err != nil { - logger.WithError(err).Panic("Failed to obtain localhost IP") - } - config.Daemon.Address = local - } else { - if !iputil.IsLocalhost(config.Daemon.Address) { - logger.WithField("addr", config.Daemon.Address).Panic("Invalid address for localhost-only") - } - } - config.Pex.AllowLocalhost = true - } - config.Pool.port = config.Daemon.Port - config.Pool.address = config.Daemon.Address - - if config.Daemon.DisableNetworking { - logger.Info("Networking is disabled") - config.Pex.Disabled = true - config.Daemon.DisableIncomingConnections = true - config.Daemon.DisableOutgoingConnections = true - } else { - if config.Daemon.DisableIncomingConnections { - logger.Info("Incoming connections are disabled.") - } - if config.Daemon.DisableOutgoingConnections { - logger.Info("Outgoing connections are disabled.") - } - } - - if config.Daemon.MaxConnections < config.Daemon.MaxOutgoingConnections { - return Config{}, errors.New("MaxOutgoingConnections cannot be more than MaxConnections") - } - - if config.Daemon.MaxPendingConnections > config.Daemon.MaxOutgoingConnections { - config.Daemon.MaxPendingConnections = config.Daemon.MaxOutgoingConnections - } - - config.Pool.MaxConnections = config.Daemon.MaxConnections - config.Pool.MaxOutgoingConnections = config.Daemon.MaxOutgoingConnections - config.Pool.MaxIncomingMessageLength = int(config.Daemon.MaxIncomingMessageLength) - config.Pool.MaxOutgoingMessageLength = int(config.Daemon.MaxOutgoingMessageLength) - - // MaxOutgoingMessageLength must be able to fit a GiveBlocksMessage with at least one maximum-sized block, - // otherwise it cannot send certain blocks. - // Blocks are the largest object sent over the network, so MaxBlockTransactionsSize is used as an upper limit - maxSizeGBM := maxSizeGiveBlocksMessage(config.Daemon.MaxBlockTransactionsSize) - if config.Daemon.MaxOutgoingMessageLength < maxSizeGBM { - return Config{}, fmt.Errorf("MaxOutgoingMessageLength must be >= %d", maxSizeGBM) - } - - userAgent, err := config.Daemon.UserAgent.Build() - if err != nil { - return Config{}, err - } - if userAgent == "" { - return Config{}, errors.New("user agent is required") - } - config.Daemon.userAgent = userAgent - - return config, nil -} - -// maxSizeGiveBlocksMessage return the encoded size of a GiveBlocksMessage -// with a single signed block of the largest possible size -func maxSizeGiveBlocksMessage(maxBlockSize uint32) uint64 { - size := uint64(4) // message type prefix - size += encodeSizeGiveBlocksMessage(&GiveBlocksMessage{}) // size of an empty GiveBlocksMessage - size += encodeSizeSignedBlock(&coin.SignedBlock{}) // size of an empty SignedBlock - size += uint64(maxBlockSize) // maximum size of all transactions in a block - return size -} - -// DaemonConfig configuration for the Daemon -type DaemonConfig struct { //nolint:golint - // Protocol version. TODO -- manage version better - ProtocolVersion int32 - // Minimum accepted protocol version - MinProtocolVersion int32 - // IP Address to serve on. Leave empty for automatic assignment - Address string - // BlockchainPubkey blockchain pubkey string - BlockchainPubkey cipher.PubKey - // GenesisHash genesis block hash - GenesisHash cipher.SHA256 - // TCP/UDP port for connections - Port int - // Directory where application data is stored - DataDirectory string - // How often to check and initiate an outgoing connection to a trusted connection if needed - OutgoingTrustedRate time.Duration - // How often to check and initiate an outgoing connection if needed - OutgoingRate time.Duration - // How often to re-attempt to fill any missing private (aka required) connections - PrivateRate time.Duration - // Maximum number of connections - MaxConnections int - // Number of outgoing connections to maintain - MaxOutgoingConnections int - // Maximum number of connections to try at once - MaxPendingConnections int - // How long to wait for a version packet - IntroductionWait time.Duration - // How often to check for peers that have decided to stop communicating - CullInvalidRate time.Duration - // How often to update the database with transaction announcement timestamps - FlushAnnouncedTxnsRate time.Duration - // How many connections are allowed from the same base IP - IPCountsMax int - // Disable all networking activity - DisableNetworking bool - // Don't make outgoing connections - DisableOutgoingConnections bool - // Don't allow incoming connections - DisableIncomingConnections bool - // Run on localhost and only connect to localhost peers - LocalhostOnly bool - // Log ping and pong messages - LogPings bool - // How often to request blocks from peers - BlocksRequestRate time.Duration - // How often to announce our blocks to peers - BlocksAnnounceRate time.Duration - // How many blocks to request in a GetBlocksMessage - GetBlocksRequestCount uint64 - // Maximum number of blocks to respond with to a GetBlocksMessage - MaxGetBlocksResponseCount uint64 - // Max announce txns hash number - MaxTxnAnnounceNum int - // How often new blocks are created by the signing node, in seconds - BlockCreationInterval uint64 - // How often to check the unconfirmed pool for transactions that become valid - UnconfirmedRefreshRate time.Duration - // How often to remove transactions that become permanently invalid from the unconfirmed pool - UnconfirmedRemoveInvalidRate time.Duration - // Default "trusted" peers - DefaultConnections []string - // User agent (sent in introduction messages) - UserAgent useragent.Data - userAgent string // parsed from UserAgent in preprocess() - // Transaction verification parameters for unconfirmed transactions - UnconfirmedVerifyTxn params.VerifyTxn - // Random nonce value for detecting self-connection in introduction messages - Mirror uint32 - // Maximum size of incoming messages - MaxIncomingMessageLength uint64 - // Maximum size of incoming messages - MaxOutgoingMessageLength uint64 - // Maximum total size of transactions in a block - MaxBlockTransactionsSize uint32 -} - -// NewDaemonConfig creates daemon config -func NewDaemonConfig() DaemonConfig { - return DaemonConfig{ - ProtocolVersion: 2, - MinProtocolVersion: 2, - Address: "", - Port: 6677, - OutgoingRate: time.Second * 5, - OutgoingTrustedRate: time.Millisecond * 100, - PrivateRate: time.Second * 5, - MaxConnections: 128, - MaxOutgoingConnections: 8, - MaxPendingConnections: 8, - IntroductionWait: time.Second * 30, - CullInvalidRate: time.Second * 3, - FlushAnnouncedTxnsRate: time.Second * 3, - IPCountsMax: 3, - DisableNetworking: false, - DisableOutgoingConnections: false, - DisableIncomingConnections: false, - LocalhostOnly: false, - LogPings: true, - BlocksRequestRate: time.Second * 60, - BlocksAnnounceRate: time.Second * 60, - GetBlocksRequestCount: 20, - MaxGetBlocksResponseCount: 20, - MaxTxnAnnounceNum: 16, - BlockCreationInterval: 10, - UnconfirmedRefreshRate: time.Minute, - UnconfirmedRemoveInvalidRate: time.Minute, - Mirror: rand.New(rand.NewSource(time.Now().UTC().UnixNano())).Uint32(), - UnconfirmedVerifyTxn: params.UserVerifyTxn, - MaxOutgoingMessageLength: 256 * 1024, - MaxIncomingMessageLength: 1024 * 1024, - MaxBlockTransactionsSize: 32768, - } -} - -//go:generate mockery -name daemoner -case underscore -inpkg -testonly - -// daemoner Daemon interface -type daemoner interface { - Disconnect(addr string, r gnet.DisconnectReason) error - DaemonConfig() DaemonConfig - sendMessage(addr string, msg gnet.Message) error - broadcastMessage(msg gnet.Message) ([]uint64, error) - disconnectNow(addr string, r gnet.DisconnectReason) error - addPeers(addrs []string) int - recordPeerHeight(addr string, gnetID, height uint64) - getSignedBlocksSince(seq, count uint64) ([]coin.SignedBlock, error) - headBkSeq() (uint64, bool, error) - executeSignedBlock(b coin.SignedBlock) error - filterKnownUnconfirmed(txns []cipher.SHA256) ([]cipher.SHA256, error) - getKnownUnconfirmed(txns []cipher.SHA256) (coin.Transactions, error) - requestBlocksFromAddr(addr string) error - announceAllValidTxns() error - pexConfig() pex.Config - injectTransaction(txn coin.Transaction) (bool, *visor.ErrTxnViolatesSoftConstraint, error) - recordMessageEvent(m asyncMessage, c *gnet.MessageContext) error - connectionIntroduced(addr string, gnetID uint64, m *IntroductionMessage) (*connection, error) - sendRandomPeers(addr string) error -} - -// Daemon stateful properties of the daemon -type Daemon struct { - // Daemon configuration - config DaemonConfig - - // Components - Messages *Messages - pool *Pool - pex *pex.Pex - visor *visor.Visor - - // Cache of announced transactions that are flushed to the database periodically - announcedTxns *announcedTxnsCache - // Cache of connection metadata - connections *Connections - // connect, disconnect, message, error events channel - events chan interface{} - // quit channel - quit chan struct{} - // done channel - done chan struct{} -} - -// New returns a Daemon with primitives allocated -func New(config Config, v *visor.Visor) (*Daemon, error) { - config, err := config.preprocess() - if err != nil { - return nil, err - } - - pex, err := pex.New(config.Pex) - if err != nil { - return nil, err - } - - d := &Daemon{ - config: config.Daemon, - Messages: NewMessages(config.Messages), - pex: pex, - visor: v, - - announcedTxns: newAnnouncedTxnsCache(), - connections: NewConnections(), - events: make(chan interface{}, config.Pool.EventChannelSize), - quit: make(chan struct{}), - done: make(chan struct{}), - } - - d.pool, err = NewPool(config.Pool, d) - if err != nil { - return nil, err - } - - d.Messages.Config.Register() - - return d, nil -} - -// ConnectEvent generated when a client connects -type ConnectEvent struct { - GnetID uint64 - Addr string - Solicited bool -} - -// DisconnectEvent generated when a connection terminated -type DisconnectEvent struct { - GnetID uint64 - Addr string - Reason gnet.DisconnectReason -} - -// ConnectFailureEvent represent a failure to connect/dial a connection, with context -type ConnectFailureEvent struct { - Addr string - Solicited bool - Error error -} - -// messageEvent encapsulates a deserialized message from the network -type messageEvent struct { - Message asyncMessage - Context *gnet.MessageContext -} - -// Shutdown terminates all subsystems safely -func (dm *Daemon) Shutdown() { - defer logger.Info("Daemon shutdown complete") - - // close daemon run loop first to avoid creating new connection after - // the connection pool is shutdown. - logger.Info("Stopping the daemon run loop") - close(dm.quit) - - logger.Info("Shutting down Pool") - dm.pool.Shutdown() - - logger.Info("Shutting down Pex") - dm.pex.Shutdown() - - <-dm.done -} - -// Run main loop for peer/connection management -func (dm *Daemon) Run() error { - defer logger.Info("Daemon closed") - defer close(dm.done) - - logger.Infof("Daemon UserAgent is %s", dm.config.userAgent) - logger.Infof("Daemon unconfirmed BurnFactor is %d", dm.config.UnconfirmedVerifyTxn.BurnFactor) - logger.Infof("Daemon unconfirmed MaxTransactionSize is %d", dm.config.UnconfirmedVerifyTxn.MaxTransactionSize) - logger.Infof("Daemon unconfirmed MaxDropletPrecision is %d", dm.config.UnconfirmedVerifyTxn.MaxDropletPrecision) - - errC := make(chan error, 5) - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - if err := dm.pex.Run(); err != nil { - logger.WithError(err).Error("daemon.Pex.Run failed") - errC <- err - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - if dm.config.DisableIncomingConnections { - if err := dm.pool.RunOffline(); err != nil { - logger.WithError(err).Error("daemon.Pool.RunOffline failed") - errC <- err - } - } else { - if err := dm.pool.Run(); err != nil { - logger.WithError(err).Error("daemon.Pool.Run failed") - errC <- err - } - } - }() - - blockInterval := time.Duration(dm.config.BlockCreationInterval) - blockCreationTicker := time.NewTicker(time.Second * blockInterval) - if !dm.visor.Config.IsBlockPublisher { - blockCreationTicker.Stop() - } - - unconfirmedRefreshTicker := time.NewTicker(dm.config.UnconfirmedRefreshRate) - defer unconfirmedRefreshTicker.Stop() - unconfirmedRemoveInvalidTicker := time.NewTicker(dm.config.UnconfirmedRemoveInvalidRate) - defer unconfirmedRemoveInvalidTicker.Stop() - blocksRequestTicker := time.NewTicker(dm.config.BlocksRequestRate) - defer blocksRequestTicker.Stop() - blocksAnnounceTicker := time.NewTicker(dm.config.BlocksAnnounceRate) - defer blocksAnnounceTicker.Stop() - - // outgoingTrustedConnectionsTicker is used to maintain at least one connection to a trusted peer. - // This may be configured at a very frequent rate, so if no trusted connections could be reached, - // there could be a lot of churn. - // The additional outgoingTrustedConnectionsTicker parameters are used to - // skip ticks of the outgoingTrustedConnectionsTicker in the event of total failure. - // outgoingTrustedConnectionsTickerSkipDuration is the minimum time to wait between - // ticks in the event of total failure. - outgoingTrustedConnectionsTicker := time.NewTicker(dm.config.OutgoingTrustedRate) - defer outgoingTrustedConnectionsTicker.Stop() - outgoingTrustedConnectionsTickerSkipDuration := time.Second * 5 - outgoingTrustedConnectionsTickerSkip := false - var outgoingTrustedConnectionsTickerSkipStart time.Time - - privateConnectionsTicker := time.NewTicker(dm.config.PrivateRate) - defer privateConnectionsTicker.Stop() - cullInvalidTicker := time.NewTicker(dm.config.CullInvalidRate) - defer cullInvalidTicker.Stop() - outgoingConnectionsTicker := time.NewTicker(dm.config.OutgoingRate) - defer outgoingConnectionsTicker.Stop() - requestPeersTicker := time.NewTicker(dm.pex.Config.RequestRate) - defer requestPeersTicker.Stop() - clearStaleConnectionsTicker := time.NewTicker(dm.pool.Config.ClearStaleRate) - defer clearStaleConnectionsTicker.Stop() - idleCheckTicker := time.NewTicker(dm.pool.Config.IdleCheckRate) - defer idleCheckTicker.Stop() - - flushAnnouncedTxnsTicker := time.NewTicker(dm.config.FlushAnnouncedTxnsRate) - defer flushAnnouncedTxnsTicker.Stop() - - // Connect to all trusted peers on startup to try to ensure a connection establishes quickly. - // The number of connections to default peers is restricted; - // if multiple connections succeed, extra connections beyond the limit will be disconnected. - if !dm.config.DisableOutgoingConnections { - wg.Add(1) - go func() { - defer wg.Done() - dm.connectToTrustedPeers() - }() - } - - var setupErr error - elapser := elapse.NewElapser(daemonRunDurationThreshold, logger) - - // Process SendResults in a separate goroutine, otherwise SendResults - // will fill up much faster than can be processed by the daemon run loop - // dm.handleMessageSendResult must take care not to perform any operation - // that would violate thread safety, since it is not serialized by the daemon run loop - wg.Add(1) - go func() { - defer wg.Done() - elapser := elapse.NewElapser(daemonRunDurationThreshold, logger) - loop: - for { - elapser.CheckForDone() - select { - case <-dm.quit: - break loop - - case r := <-dm.pool.Pool.SendResults: - // Process message sending results - elapser.Register("dm.Pool.Pool.SendResults") - if dm.config.DisableNetworking { - logger.Error("There should be nothing in SendResults") - return - } - dm.handleMessageSendResult(r) - } - } - }() - -loop: - for { - elapser.CheckForDone() - select { - case <-dm.quit: - break loop - - case <-cullInvalidTicker.C: - // Remove connections that failed to complete the handshake - elapser.Register("cullInvalidTicker") - if !dm.config.DisableNetworking { - dm.cullInvalidConnections() - } - - case <-requestPeersTicker.C: - // Request peers via PEX - elapser.Register("requestPeersTicker") - if dm.pex.Config.Disabled { - continue - } - - if dm.pex.IsFull() { - continue - } - - m := NewGetPeersMessage() - if _, err := dm.broadcastMessage(m); err != nil { - logger.WithError(err).Error("Broadcast GetPeersMessage failed") - continue - } - - case <-clearStaleConnectionsTicker.C: - // Remove connections that haven't said anything in a while - elapser.Register("clearStaleConnectionsTicker") - if !dm.config.DisableNetworking { - conns, err := dm.pool.getStaleConnections() - if err != nil { - logger.WithError(err).Error("getStaleConnections failed") - continue - } - - for _, addr := range conns { - if err := dm.Disconnect(addr, ErrDisconnectIdle); err != nil { - logger.WithError(err).WithField("addr", addr).Error("Disconnect") - } - } - } - - case <-idleCheckTicker.C: - // Sends pings as needed - elapser.Register("idleCheckTicker") - if !dm.config.DisableNetworking { - dm.pool.sendPings() - } - - case <-outgoingConnectionsTicker.C: - // Fill up our outgoing connections - elapser.Register("outgoingConnectionsTicker") - dm.connectToRandomPeer() - - case <-outgoingTrustedConnectionsTicker.C: - // Try to maintain at least one trusted connection - elapser.Register("outgoingTrustedConnectionsTicker") - // If connecting to a trusted peer totally fails, make sure to wait longer between further attempts - if outgoingTrustedConnectionsTickerSkip { - if time.Since(outgoingTrustedConnectionsTickerSkipStart) < outgoingTrustedConnectionsTickerSkipDuration { - continue - } - } - - if err := dm.maybeConnectToTrustedPeer(); err != nil && err != ErrNetworkingDisabled { - logger.Critical().WithError(err).Error("maybeConnectToTrustedPeer") - outgoingTrustedConnectionsTickerSkip = true - outgoingTrustedConnectionsTickerSkipStart = time.Now() - } else { - outgoingTrustedConnectionsTickerSkip = false - } - - case <-privateConnectionsTicker.C: - // Always try to stay connected to our private peers - // TODO (also, connect to all of them on start) - elapser.Register("privateConnectionsTicker") - if !dm.config.DisableOutgoingConnections { - dm.makePrivateConnections() - } - - case r := <-dm.events: - elapser.Register("dm.event") - if dm.config.DisableNetworking { - logger.Critical().Error("Networking is disabled, there should be no events") - } else { - dm.handleEvent(r) - } - - case <-flushAnnouncedTxnsTicker.C: - elapser.Register("flushAnnouncedTxnsTicker") - txns := dm.announcedTxns.flush() - - if err := dm.visor.SetTransactionsAnnounced(txns); err != nil { - logger.WithError(err).Error("Failed to set unconfirmed txn announce time") - } - - case <-blockCreationTicker.C: - // Create blocks, if block publisher - elapser.Register("blockCreationTicker.C") - if dm.visor.Config.IsBlockPublisher { - sb, err := dm.createAndPublishBlock() - if err != nil { - logger.WithError(err).Error("Failed to create and publish block") - continue - } - - // Not a critical error, but we want it visible in logs - head := sb.Block.Head - logger.Critical().WithFields(logrus.Fields{ - "version": head.Version, - "seq": head.BkSeq, - "time": head.Time, - }).Info("Created and published a new block") - } - - case <-unconfirmedRefreshTicker.C: - elapser.Register("unconfirmedRefreshTicker") - // Get the transactions that turn to valid - validTxns, err := dm.visor.RefreshUnconfirmed() - if err != nil { - logger.WithError(err).Error("dm.Visor.RefreshUnconfirmed failed") - continue - } - // Announce these transactions - if err := dm.announceTxnHashes(validTxns); err != nil { - logger.WithError(err).Warning("announceTxnHashes failed") - } - - case <-unconfirmedRemoveInvalidTicker.C: - elapser.Register("unconfirmedRemoveInvalidTicker") - // Remove transactions that become invalid (violating hard constraints) - removedTxns, err := dm.visor.RemoveInvalidUnconfirmed() - if err != nil { - logger.WithError(err).Error("dm.Visor.RemoveInvalidUnconfirmed failed") - continue - } - if len(removedTxns) > 0 { - logger.Infof("Remove %d txns from pool that began violating hard constraints", len(removedTxns)) - } - - case <-blocksRequestTicker.C: - elapser.Register("blocksRequestTicker") - if err := dm.requestBlocks(); err != nil { - logger.WithError(err).Warning("requestBlocks failed") - } - - case <-blocksAnnounceTicker.C: - elapser.Register("blocksAnnounceTicker") - if err := dm.announceBlocks(); err != nil { - logger.WithError(err).Warning("announceBlocks failed") - } - - case setupErr = <-errC: - logger.WithError(setupErr).Error("read from errc") - break loop - } - } - - if setupErr != nil { - return setupErr - } - - wg.Wait() - - return nil -} - -// Connects to a given peer. Returns an error if no connection attempt was -// made. If the connection attempt itself fails, the error is sent to -// the connectionErrors channel. -func (dm *Daemon) connectToPeer(p pex.Peer) error { - if dm.config.DisableOutgoingConnections { - return errors.New("Outgoing connections disabled") - } - - a, _, err := iputil.SplitAddr(p.Addr) - if err != nil { - logger.Critical().WithField("addr", p.Addr).WithError(err).Warning("PEX gave us an invalid peer") - return errors.New("Invalid peer") - } - - if dm.config.LocalhostOnly && !iputil.IsLocalhost(a) { - return errors.New("Not localhost") - } - - if c := dm.connections.get(p.Addr); c != nil { - return errors.New("Already connected to this peer") - } - - cnt := dm.connections.IPCount(a) - if !dm.config.LocalhostOnly && cnt != 0 { - return errors.New("Already connected to a peer with this base IP") - } - - logger.WithField("addr", p.Addr).Debug("Establishing outgoing connection") - - if _, err := dm.connections.pending(p.Addr); err != nil { - logger.Critical().WithError(err).WithField("addr", p.Addr).Error("dm.connections.pending failed") - return err - } - - go func() { - if err := dm.pool.Pool.Connect(p.Addr); err != nil { - dm.events <- ConnectFailureEvent{ - Addr: p.Addr, - Solicited: true, - Error: err, - } - } - }() - return nil -} - -// Connects to all private peers -func (dm *Daemon) makePrivateConnections() { - if dm.config.DisableOutgoingConnections { - return - } - - peers := dm.pex.Private() - for _, p := range peers { - logger.WithField("addr", p.Addr).Info("Private peer attempt") - if err := dm.connectToPeer(p); err != nil { - logger.WithField("addr", p.Addr).WithError(err).Debug("Did not connect to private peer") - } - } -} - -// connectToTrustedPeers tries to connect to all trusted peers -func (dm *Daemon) connectToTrustedPeers() { - if dm.config.DisableOutgoingConnections { - return - } - - logger.Info("Connect to trusted peers") - // Make connections to all trusted peers to try to ensure a connection - // MaxDefaultPeerOutgoingConnections limits will be enforced in gnet - // after connections have been established, so not all trusted peers will be connected to. - peers := dm.pex.TrustedPublic() - for _, p := range peers { - if err := dm.connectToPeer(p); err != nil { - logger.WithError(err).WithField("addr", p.Addr).Warning("connect to trusted peer failed") - } - } -} - -// maybeConnectToTrustedPeer tries to connect to one trusted peer if there are no trusted connections -func (dm *Daemon) maybeConnectToTrustedPeer() error { - if dm.config.DisableOutgoingConnections { - return ErrNetworkingDisabled - } - - peers := dm.pex.TrustedPublic() - for _, p := range peers { - // Don't make a connection if we have a trusted peer connection - if len(dm.connections.getByListenAddr(p.Addr)) != 0 { - return nil - } - } - - connected := false - for _, p := range peers { - if err := dm.connectToPeer(p); err != nil { - logger.WithError(err).WithField("addr", p.Addr).Warning("maybeConnectToTrustedPeer: connectToPeer failed") - continue - } - connected = true - break - } - - if !connected { - return errors.New("Could not connect to any trusted peer") - } - - return nil -} - -// connectToRandomPeer attempts to connect to a random peer. If it fails, the peer is removed. -func (dm *Daemon) connectToRandomPeer() { - if dm.config.DisableOutgoingConnections { - return - } - if dm.connections.OutgoingLen() >= dm.config.MaxOutgoingConnections { - return - } - if dm.connections.PendingLen() >= dm.config.MaxPendingConnections { - return - } - if dm.connections.Len() >= dm.config.MaxConnections { - return - } - - // Make a connection to a random (public) peer - peers := dm.pex.RandomPublic(dm.config.MaxOutgoingConnections - dm.connections.OutgoingLen()) - for _, p := range peers { - if err := dm.connectToPeer(p); err != nil { - logger.WithError(err).WithField("addr", p.Addr).Warning("connectToPeer failed") - } - } - - // TODO -- don't reset if not needed? - if len(peers) == 0 { - dm.pex.ResetAllRetryTimes() - } -} - -// Removes connections who haven't sent a version after connecting -func (dm *Daemon) cullInvalidConnections() { - now := time.Now().UTC() - for _, c := range dm.connections.all() { - if c.State != ConnectionStateConnected { - continue - } - - if c.ConnectedAt.Add(dm.config.IntroductionWait).Before(now) { - logger.WithField("addr", c.Addr).Info("Disconnecting peer for not sending a version") - if err := dm.Disconnect(c.Addr, ErrDisconnectIntroductionTimeout); err != nil { - logger.WithError(err).WithField("addr", c.Addr).Error("Disconnect") - } - } - } -} - -func (dm *Daemon) isTrustedPeer(addr string) bool { - peer, ok := dm.pex.GetPeer(addr) - if !ok { - return false - } - - return peer.Trusted -} - -// recordMessageEvent records an asyncMessage to the messageEvent chan. Do not access -// messageEvent directly. -func (dm *Daemon) recordMessageEvent(m asyncMessage, c *gnet.MessageContext) error { - dm.events <- messageEvent{ - Message: m, - Context: c, - } - return nil -} - -func (dm *Daemon) handleEvent(e interface{}) { - switch x := e.(type) { - case messageEvent: - dm.onMessageEvent(x) - case ConnectEvent: - dm.onConnectEvent(x) - case DisconnectEvent: - dm.onDisconnectEvent(x) - case ConnectFailureEvent: - dm.onConnectFailure(x) - default: - logger.WithFields(logrus.Fields{ - "type": fmt.Sprintf("%T", e), - "value": fmt.Sprintf("%+v", e), - }).Panic("Invalid object in events queue") - } -} - -func (dm *Daemon) onMessageEvent(e messageEvent) { - // If the connection does not exist or the gnet ID is different, abort message processing - // This can occur because messageEvents for a given connection may occur - // after that connection has disconnected. - c := dm.connections.get(e.Context.Addr) - if c == nil { - logger.WithFields(logrus.Fields{ - "addr": e.Context.Addr, - "messageType": fmt.Sprintf("%T", e.Message), - }).Info("onMessageEvent no connection found") - return - } - - if c.gnetID != e.Context.ConnID { - logger.WithFields(logrus.Fields{ - "addr": e.Context.Addr, - "connGnetID": c.gnetID, - "contextGnetID": e.Context.ConnID, - "messageType": fmt.Sprintf("%T", e.Message), - }).Info("onMessageEvent connection gnetID does not match") - return - } - - // The first message received must be INTR, DISC or GIVP - if !c.HasIntroduced() { - switch e.Message.(type) { - case *IntroductionMessage, *DisconnectMessage, *GivePeersMessage: - default: - logger.WithFields(logrus.Fields{ - "addr": e.Context.Addr, - "messageType": fmt.Sprintf("%T", e.Message), - }).Info("needsIntro but first message is not INTR, DISC or GIVP") - if err := dm.Disconnect(e.Context.Addr, ErrDisconnectNoIntroduction); err != nil { - logger.WithError(err).WithField("addr", e.Context.Addr).Error("Disconnect") - } - return - } - } - - e.Message.process(dm) -} - -func (dm *Daemon) onConnectEvent(e ConnectEvent) { - fields := logrus.Fields{ - "addr": e.Addr, - "outgoing": e.Solicited, - "gnetID": e.GnetID, - } - logger.WithFields(fields).Info("onConnectEvent") - - // Update the connections state machine first - c, err := dm.connections.connected(e.Addr, e.GnetID) - if err != nil { - logger.Critical().WithError(err).WithFields(fields).Error("connections.Connected failed") - if err := dm.Disconnect(e.Addr, ErrDisconnectUnexpectedError); err != nil { - logger.WithError(err).WithFields(fields).Error("Disconnect") - } - return - } - - // The connection should already be known as outgoing/solicited due to an earlier connections.pending call. - // If they do not match, there is e.Addr flaw in the concept or implementation of the state machine. - if c.Outgoing != e.Solicited { - logger.Critical().WithFields(fields).Warning("Connection.Outgoing does not match ConnectEvent.Solicited state") - } - - if dm.ipCountMaxed(e.Addr) { - logger.WithFields(fields).Info("Max connections for this IP address reached, disconnecting") - if err := dm.Disconnect(e.Addr, ErrDisconnectIPLimitReached); err != nil { - logger.WithError(err).WithFields(fields).Error("Disconnect") - } - return - } - - logger.WithFields(fields).Debug("Sending introduction message") - - if err := dm.sendMessage(e.Addr, NewIntroductionMessage( - dm.config.Mirror, - dm.config.ProtocolVersion, - dm.pool.Pool.Config.Port, - dm.config.BlockchainPubkey, - dm.config.userAgent, - dm.config.UnconfirmedVerifyTxn, - dm.config.GenesisHash, - )); err != nil { - logger.WithFields(fields).WithError(err).Error("Send IntroductionMessage failed") - return - } -} - -func (dm *Daemon) onDisconnectEvent(e DisconnectEvent) { - fields := logrus.Fields{ - "addr": e.Addr, - "reason": e.Reason, - "gnetID": e.GnetID, - } - logger.WithFields(fields).Info("onDisconnectEvent") - - if err := dm.connections.remove(e.Addr, e.GnetID); err != nil { - logger.WithError(err).WithFields(fields).Error("connections.Remove failed") - return - } - - // TODO -- blacklist peer for certain reasons, not just remove - switch e.Reason { - case ErrDisconnectIntroductionTimeout, - ErrDisconnectBlockchainPubkeyNotMatched, - ErrDisconnectInvalidExtraData, - ErrDisconnectInvalidUserAgent: - if !dm.isTrustedPeer(e.Addr) { - dm.pex.RemovePeer(e.Addr) - } - case ErrDisconnectNoIntroduction, - ErrDisconnectVersionNotSupported, - ErrDisconnectSelf: - dm.pex.IncreaseRetryTimes(e.Addr) - default: - switch e.Reason.Error() { - case "read failed: EOF": - dm.pex.IncreaseRetryTimes(e.Addr) - } - } -} - -func (dm *Daemon) onConnectFailure(c ConnectFailureEvent) { - // Remove the pending connection from connections and update the retry times in pex - logger.WithField("addr", c.Addr).WithError(c.Error).Debug("onConnectFailure") - - // onConnectFailure should only trigger for "pending" connections which have gnet ID 0; - // connections in any other state will have a nonzero gnet ID. - // if the connection is in a different state, the gnet ID will not match, the connection - // won't be removed and we'll receive an error. - // If this happens, it is a bug, and the connections state may be corrupted. - if err := dm.connections.remove(c.Addr, 0); err != nil { - logger.Critical().WithField("addr", c.Addr).WithError(err).Error("connections.remove") - } - - if strings.HasSuffix(c.Error.Error(), "connect: connection refused") { - dm.pex.IncreaseRetryTimes(c.Addr) - } -} - -// onGnetDisconnect triggered when a gnet.Connection terminates -func (dm *Daemon) onGnetDisconnect(addr string, gnetID uint64, reason gnet.DisconnectReason) { - dm.events <- DisconnectEvent{ - GnetID: gnetID, - Addr: addr, - Reason: reason, - } -} - -// onGnetConnect Triggered when a gnet.Connection connects -func (dm *Daemon) onGnetConnect(addr string, gnetID uint64, solicited bool) { - dm.events <- ConnectEvent{ - GnetID: gnetID, - Addr: addr, - Solicited: solicited, - } -} - -// onGnetConnectFailure triggered when a gnet.Connection fails to connect -func (dm *Daemon) onGnetConnectFailure(addr string, solicited bool, err error) { - dm.events <- ConnectFailureEvent{ - Addr: addr, - Solicited: solicited, - Error: err, - } -} - -// Returns whether the ipCount maximum has been reached. -// Always false when using LocalhostOnly config. -func (dm *Daemon) ipCountMaxed(addr string) bool { - ip, _, err := iputil.SplitAddr(addr) - if err != nil { - logger.Critical().WithField("addr", addr).Error("ipCountMaxed called with invalid addr") - return true - } - - return !dm.config.LocalhostOnly && dm.connections.IPCount(ip) >= dm.config.IPCountsMax -} - -// When an async message send finishes, its result is handled by this. -// This method must take care to perform only thread-safe actions, since it is called -// outside of the daemon run loop -func (dm *Daemon) handleMessageSendResult(r gnet.SendResult) { - if r.Error != nil { - var lg logrus.FieldLogger - if r.Error == gnet.ErrMsgExceedsMaxLen { - lg = logger.Critical() - } else { - lg = logger - } - - lg.WithError(r.Error).WithFields(logrus.Fields{ - "addr": r.Addr, - "msgType": reflect.TypeOf(r.Message), - }).Warning("Failed to send message") - return - } - - if m, ok := r.Message.(SendingTxnsMessage); ok { - dm.announcedTxns.add(m.GetFiltered()) - } - - if m, ok := r.Message.(*DisconnectMessage); ok { - if err := dm.disconnectNow(r.Addr, m.reason); err != nil { - logger.WithError(err).WithField("addr", r.Addr).Warning("disconnectNow") - } - } -} - -// requestBlocks sends a GetBlocksMessage to all connections -func (dm *Daemon) requestBlocks() error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - headSeq, ok, err := dm.visor.HeadBkSeq() - if err != nil { - return err - } - if !ok { - return errors.New("Cannot request blocks, there is no head block") - } - - m := NewGetBlocksMessage(headSeq, dm.config.GetBlocksRequestCount) - - if _, err := dm.broadcastMessage(m); err != nil { - logger.WithError(err).Debug("Broadcast GetBlocksMessage failed") - return err - } - - return nil -} - -// announceBlocks sends an AnnounceBlocksMessage to all connections -func (dm *Daemon) announceBlocks() error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - headSeq, ok, err := dm.visor.HeadBkSeq() - if err != nil { - return err - } - if !ok { - return errors.New("Cannot announce blocks, there is no head block") - } - - m := NewAnnounceBlocksMessage(headSeq) - - if _, err := dm.broadcastMessage(m); err != nil { - logger.WithError(err).Debug("Broadcast AnnounceBlocksMessage failed") - return err - } - - return nil -} - -// createAndPublishBlock creates a block from unconfirmed transactions and sends it to the network. -// Will panic if not running as a block publisher. -// Will not create a block if outgoing connections are disabled. -// If the block was created but the broadcast failed, the error will be non-nil but the -// SignedBlock value will not be empty. -// TODO -- refactor this method -- it should either always create a block and maybe broadcast it, -// or use a database transaction to rollback block publishing if broadcast failed (however, this will cause a slow DB write) -func (dm *Daemon) createAndPublishBlock() (*coin.SignedBlock, error) { - if dm.config.DisableNetworking { - return nil, ErrNetworkingDisabled - } - - sb, err := dm.visor.CreateAndExecuteBlock() - if err != nil { - return nil, err - } - - err = dm.broadcastBlock(sb) - - return &sb, err -} - -// ResendUnconfirmedTxns resends all unconfirmed transactions and returns the hashes that were successfully rebroadcast. -// It does not return an error if broadcasting fails. -func (dm *Daemon) ResendUnconfirmedTxns() ([]cipher.SHA256, error) { - if dm.config.DisableNetworking { - return nil, ErrNetworkingDisabled - } - - txns, err := dm.visor.GetAllUnconfirmedTransactions() - if err != nil { - return nil, err - } - - var txids []cipher.SHA256 - for i := range txns { - txnHash := txns[i].Transaction.Hash() - logger.WithField("txid", txnHash.Hex()).Debug("Rebroadcast transaction") - if _, err := dm.BroadcastTransaction(txns[i].Transaction); err == nil { - txids = append(txids, txnHash) - } - } - - return txids, nil -} - -// BroadcastTransaction broadcasts a single transaction to all peers. -func (dm *Daemon) BroadcastTransaction(txn coin.Transaction) ([]uint64, error) { - if dm.config.DisableNetworking { - return nil, ErrNetworkingDisabled - } - - m := NewGiveTxnsMessage(coin.Transactions{txn}, dm.config.MaxOutgoingMessageLength) - if len(m.Transactions) != 1 { - logger.Critical().Error("NewGiveTxnsMessage truncated its only transaction") - } - - ids, err := dm.broadcastMessage(m) - if err != nil { - logger.WithError(err).Error("Broadcast GiveTxnsMessage failed") - return nil, err - } - - logger.Debugf("BroadcastTransaction to %d conns", len(ids)) - - return ids, nil -} - -// BroadcastUserTransaction broadcasts a single transaction to all peers. -// Returns an error if no peers that would propagate the transaction could be reached. -func (dm *Daemon) BroadcastUserTransaction(txn coin.Transaction, head *coin.SignedBlock, inputs coin.UxArray) error { - ids, err := dm.BroadcastTransaction(txn) - if err != nil { - return err - } - - accepts, err := checkBroadcastTxnRecipients(dm.connections, ids, txn, head, inputs) - if err != nil { - logger.WithError(err).Error("BroadcastUserTransaction") - return err - } - - logger.Debugf("BroadcastUserTransaction transaction propagated by %d/%d conns", accepts, len(ids)) - - return nil -} - -// checkBroadcastTxnRecipients checks whether or not the recipients of a txn broadcast would accept the transaction as valid, -// based upon their reported txn verification parameters. -// If no recipient would accept the txn, an error is returned. -// The number of recipients that claim to accept the transaction is returned. -func checkBroadcastTxnRecipients(connections *Connections, ids []uint64, txn coin.Transaction, head *coin.SignedBlock, inputs coin.UxArray) (int, error) { - // Check if the connections will accept our transaction as valid. - // Clients v24 and earlier do not propagate soft-invalid transactions. - // Clients v24 and earlier do not advertise a user agent. - // Clients v24 and earlier do not advertise their transaction verification parameters, - // but will use defaults of BurnFactor=2, MaxTransactionSize=32768, MaxDropletPrecision=3. - // If none of the connections will propagate our transaction, return an error. - accepts := 0 - - for _, id := range ids { - c := connections.getByGnetID(id) - if c == nil { - continue - } - - if !c.HasIntroduced() { - continue - } - - // If the peer has not set their user agent, they are v24 or earlier. - // v24 and earlier will not propagate a transaction that does not pass soft-validation. - // Check if our transaction would pass their soft-validation, using the hardcoded defaults - // that are used by v24 and earlier. - if c.UserAgent.Empty() { - if err := verifyUserTxnAgainstPeer(txn, head, inputs, params.VerifyTxn{ - BurnFactor: 2, - MaxTransactionSize: 32 * 1024, - MaxDropletPrecision: 3, - }); err != nil { - logger.WithFields(logrus.Fields{ - "addr": c.Addr, - "gnetID": c.gnetID, - }).Debug("Peer will not propagate this transaction") - continue - } - } - - accepts++ - } - - if accepts == 0 { - return 0, ErrNoPeerAcceptsTxn - } - - return accepts, nil -} - -// verifyUserTxnAgainstPeer returns an error if a user-created transaction would not pass soft-validation -// according to a peer's reported verification parameters -func verifyUserTxnAgainstPeer(txn coin.Transaction, head *coin.SignedBlock, inputs coin.UxArray, verifyParams params.VerifyTxn) error { - // Check the droplet precision - for _, o := range txn.Out { - if err := params.DropletPrecisionCheck(verifyParams.MaxDropletPrecision, o.Coins); err != nil { - return err - } - } - - // Check the txn size - txnSize, err := txn.Size() - if err != nil { - logger.Critical().WithError(err).Error("txn.Size failed unexpectedly") - return err - } - - if txnSize > verifyParams.MaxTransactionSize { - return visor.ErrTxnExceedsMaxBlockSize - } - - // Check the coinhour burn fee - f, err := fee.TransactionFee(&txn, head.Time(), inputs) - if err != nil { - return err - } - - if err := fee.VerifyTransactionFee(&txn, f, verifyParams.BurnFactor); err != nil { - return err - } - - return nil -} - -// Disconnect sends a DisconnectMessage to a peer. After the DisconnectMessage is sent, the peer is disconnected. -// This allows all pending messages to be sent. Any message queued after a DisconnectMessage is unlikely to be sent -// to the peer (but possible). -func (dm *Daemon) Disconnect(addr string, r gnet.DisconnectReason) error { - logger.WithFields(logrus.Fields{ - "addr": addr, - "reason": r, - }).Debug("Sending DisconnectMessage") - return dm.sendMessage(addr, NewDisconnectMessage(r)) -} - -// Implements private daemoner interface methods: - -// requestBlocksFromAddr sends a GetBlocksMessage to one connected address -func (dm *Daemon) requestBlocksFromAddr(addr string) error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - headSeq, ok, err := dm.visor.HeadBkSeq() - if err != nil { - return err - } - if !ok { - return errors.New("Cannot request blocks from addr, there is no head block") - } - - m := NewGetBlocksMessage(headSeq, dm.config.GetBlocksRequestCount) - return dm.sendMessage(addr, m) -} - -// broadcastBlock sends a signed block to all connections -func (dm *Daemon) broadcastBlock(sb coin.SignedBlock) error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - m := NewGiveBlocksMessage([]coin.SignedBlock{sb}, dm.config.MaxOutgoingMessageLength) - if len(m.Blocks) != 1 { - logger.Critical().Error("NewGiveBlocksMessage truncated its only block") - } - - _, err := dm.broadcastMessage(m) - return err -} - -// DaemonConfig returns the daemon config -func (dm *Daemon) DaemonConfig() DaemonConfig { - return dm.config -} - -// connectionIntroduced transfers a connection to the "introduced" state in the connections state machine -// and updates other state -func (dm *Daemon) connectionIntroduced(addr string, gnetID uint64, m *IntroductionMessage) (*connection, error) { - c, err := dm.connections.introduced(addr, gnetID, m) - if err != nil { - return nil, err - } - - listenAddr := c.ListenAddr() - - fields := logrus.Fields{ - "addr": addr, - "gnetID": m.c.ConnID, - "connGnetID": c.gnetID, - "listenPort": m.ListenPort, - "listenAddr": listenAddr, - } - - if c.Outgoing { - // For successful outgoing connections, mark the peer as having an incoming port in the pex peerlist - // The peer should already be in the peerlist, since we use the peerlist to choose an outgoing connection to make - if err := dm.pex.SetHasIncomingPort(listenAddr, true); err != nil { - logger.Critical().WithError(err).WithFields(fields).Error("pex.SetHasIncomingPort failed") - return nil, err - } - } else { - // For successful incoming connections, add the peer to the peer list, with their self-reported listen port - if err := dm.pex.AddPeer(listenAddr); err != nil { - logger.Critical().WithError(err).WithFields(fields).Error("pex.AddPeer failed") - return nil, err - } - } - - if err := dm.pex.SetUserAgent(listenAddr, c.UserAgent); err != nil { - logger.Critical().WithError(err).WithFields(fields).Error("pex.SetUserAgent failed") - return nil, err - } - - dm.pex.ResetRetryTimes(listenAddr) - - return c, nil -} - -// sendRandomPeers sends a random sample of peers to another peer -func (dm *Daemon) sendRandomPeers(addr string) error { - peers := dm.pex.RandomExchangeable(dm.pex.Config.ReplyCount) - if len(peers) == 0 { - logger.Debug("sendRandomPeers: no peers to send in reply") - return errors.New("No peers available") - } - - m := NewGivePeersMessage(peers, dm.config.MaxOutgoingMessageLength) - - return dm.sendMessage(addr, m) -} - -// announceAllValidTxns broadcasts valid unconfirmed transactions -func (dm *Daemon) announceAllValidTxns() error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - // Get valid unconfirmed transaction hashes - hashes, err := dm.visor.GetAllValidUnconfirmedTxHashes() - if err != nil { - return err - } - - return dm.announceTxnHashes(hashes) -} - -// announceTxnHashes announces transaction hashes, splitting them into chunks if they exceed MaxTxnAnnounceNum -func (dm *Daemon) announceTxnHashes(hashes []cipher.SHA256) error { - if dm.config.DisableNetworking { - return ErrNetworkingDisabled - } - - // Divide hashes into multiple sets of max size - hashesSet := divideHashes(hashes, dm.config.MaxTxnAnnounceNum) - - for _, hs := range hashesSet { - m := NewAnnounceTxnsMessage(hs, dm.config.MaxOutgoingMessageLength) - if len(m.Transactions) != len(hs) { - logger.Critical().Error("NewAnnounceTxnsMessage truncated hashes that were already split up") - } - if _, err := dm.broadcastMessage(m); err != nil { - logger.WithError(err).Debug("Broadcast AnnounceTxnsMessage failed") - return err - } - } - - return nil -} - -func divideHashes(hashes []cipher.SHA256, n int) [][]cipher.SHA256 { - if len(hashes) == 0 { - return [][]cipher.SHA256{} - } - - var j int - var hashesArray [][]cipher.SHA256 - - if len(hashes) > n { - for i := range hashes { - if len(hashes[j:i]) == n { - hs := make([]cipher.SHA256, n) - copy(hs, hashes[j:i]) - hashesArray = append(hashesArray, hs) - j = i - } - } - } - - hs := make([]cipher.SHA256, len(hashes)-j) - copy(hs, hashes[j:]) - hashesArray = append(hashesArray, hs) - return hashesArray -} - -// sendMessage sends a Message to a Connection and pushes the result onto the SendResults channel. -func (dm *Daemon) sendMessage(addr string, msg gnet.Message) error { - return dm.pool.Pool.SendMessage(addr, msg) -} - -// broadcastMessage sends a Message to all introduced connections in the Pool. -// Returns the gnet IDs of connections that broadcast succeeded for. -// Note that a connection could still fail to receive the message under certain network conditions, -// there is no guarantee that a message was broadcast. -func (dm *Daemon) broadcastMessage(msg gnet.Message) ([]uint64, error) { - if dm.config.DisableNetworking { - return nil, ErrNetworkingDisabled - } - - conns := dm.connections.all() - var addrs []string - for _, c := range conns { - if c.HasIntroduced() { - addrs = append(addrs, c.Addr) - } - } - - return dm.pool.Pool.BroadcastMessage(msg, addrs) -} - -// disconnectNow disconnects from a peer immediately without sending a DisconnectMessage. Any pending messages -// will not be sent to the peer. -func (dm *Daemon) disconnectNow(addr string, r gnet.DisconnectReason) error { - return dm.pool.Pool.Disconnect(addr, r) -} - -// pexConfig returns the pex config -func (dm *Daemon) pexConfig() pex.Config { - return dm.pex.Config -} - -// addPeers adds peers to the pex -func (dm *Daemon) addPeers(addrs []string) int { - return dm.pex.AddPeers(addrs) -} - -// recordPeerHeight records the height of specific peer -func (dm *Daemon) recordPeerHeight(addr string, gnetID, height uint64) { - if err := dm.connections.SetHeight(addr, gnetID, height); err != nil { - logger.Critical().WithError(err).WithField("addr", addr).Error("connections.SetHeight failed") - } -} - -// getSignedBlocksSince returns N signed blocks since given seq -func (dm *Daemon) getSignedBlocksSince(seq, count uint64) ([]coin.SignedBlock, error) { - return dm.visor.GetSignedBlocksSince(seq, count) -} - -// headBkSeq returns the head block sequence -func (dm *Daemon) headBkSeq() (uint64, bool, error) { - return dm.visor.HeadBkSeq() -} - -// executeSignedBlock executes the signed block -func (dm *Daemon) executeSignedBlock(b coin.SignedBlock) error { - return dm.visor.ExecuteSignedBlock(b) -} - -// filterKnownUnconfirmed returns unconfirmed txn hashes with known ones removed -func (dm *Daemon) filterKnownUnconfirmed(txns []cipher.SHA256) ([]cipher.SHA256, error) { - return dm.visor.FilterKnownUnconfirmed(txns) -} - -// getKnownUnconfirmed returns unconfirmed txn hashes with known ones removed -func (dm *Daemon) getKnownUnconfirmed(txns []cipher.SHA256) (coin.Transactions, error) { - return dm.visor.GetKnownUnconfirmed(txns) -} - -// injectTransaction records a coin.Transaction to the UnconfirmedTxnPool if the txn is not -// already in the blockchain. -// The bool return value is whether or not the transaction was already in the pool. -// If the transaction violates hard constraints, it is rejected, and error will not be nil. -// If the transaction only violates soft constraints, it is still injected, and the soft constraint violation is returned. -func (dm *Daemon) injectTransaction(txn coin.Transaction) (bool, *visor.ErrTxnViolatesSoftConstraint, error) { - return dm.visor.InjectForeignTransaction(txn) -} - -/* Connection management API */ - -// Connection a connection's state within the daemon -type Connection struct { - Addr string - Pex pex.Peer - Gnet GnetConnectionDetails - ConnectionDetails -} - -// GnetConnectionDetails connection data from gnet -type GnetConnectionDetails struct { - ID uint64 - LastSent time.Time - LastReceived time.Time -} - -func newConnection(dc *connection, gc *gnet.Connection, pp *pex.Peer) Connection { - c := Connection{} - - if dc != nil { - c.Addr = dc.Addr - c.ConnectionDetails = dc.ConnectionDetails - } - - if gc != nil { - c.Gnet = GnetConnectionDetails{ - ID: gc.ID, - LastSent: gc.LastSent, - LastReceived: gc.LastReceived, - } - } - - if pp != nil { - c.Pex = *pp - } - - return c -} - -// newConnection creates a Connection from daemon.connection, gnet.Connection and pex.Peer -func (dm *Daemon) newConnection(c *connection) (*Connection, error) { - if c == nil { - return nil, nil - } - - gc, err := dm.pool.Pool.GetConnection(c.Addr) - if err != nil { - return nil, err - } - - var pp *pex.Peer - listenAddr := c.ListenAddr() - if listenAddr != "" { - p, ok := dm.pex.GetPeer(listenAddr) - if ok { - pp = &p - } - } - - cc := newConnection(c, gc, pp) - return &cc, nil -} - -// GetConnections returns solicited (outgoing) connections -func (dm *Daemon) GetConnections(f func(c Connection) bool) ([]Connection, error) { - if dm.pool.Pool == nil { - return nil, nil - } - - cs := dm.connections.all() - - conns := make([]Connection, 0) - - for _, c := range cs { - cc, err := dm.newConnection(&c) - if err != nil { - return nil, err - } - - ccc := *cc - - if !f(ccc) { - continue - } - - conns = append(conns, ccc) - } - - // Sort connnections by IP address - sort.Slice(conns, func(i, j int) bool { - return strings.Compare(conns[i].Addr, conns[j].Addr) < 0 - }) - - return conns, nil -} - -// GetDefaultConnections returns the default hardcoded connection addresses -func (dm *Daemon) GetDefaultConnections() []string { - conns := make([]string, len(dm.config.DefaultConnections)) - copy(conns[:], dm.config.DefaultConnections[:]) - return conns -} - -// GetConnection returns a *Connection of specific address -func (dm *Daemon) GetConnection(addr string) (*Connection, error) { - c := dm.connections.get(addr) - if c == nil { - return nil, nil - } - - return dm.newConnection(c) -} - -// DisconnectByGnetID disconnects a connection by gnet ID -func (dm *Daemon) DisconnectByGnetID(gnetID uint64) error { - c := dm.connections.getByGnetID(gnetID) - if c == nil { - return ErrConnectionNotExist - } - - return dm.Disconnect(c.Addr, ErrDisconnectRequestedByOperator) -} - -// GetTrustConnections returns all trusted connections -func (dm *Daemon) GetTrustConnections() []string { - return dm.pex.Trusted().ToAddrs() -} - -// GetExchgConnection returns all connections to peers found through peer exchange -func (dm *Daemon) GetExchgConnection() []string { - return dm.pex.RandomExchangeable(0).ToAddrs() -} - -/* Peer Blockchain Status API */ - -// BlockchainProgress is the current blockchain syncing status -type BlockchainProgress struct { - // Our current blockchain length - Current uint64 - // Our best guess at true blockchain length - Highest uint64 - // Individual blockchain length reports from peers - Peers []PeerBlockchainHeight -} - -// newBlockchainProgress creates BlockchainProgress from the local head blockchain sequence number -// and a list of remote peers -func newBlockchainProgress(headSeq uint64, conns []connection) *BlockchainProgress { - peers := newPeerBlockchainHeights(conns) - - return &BlockchainProgress{ - Current: headSeq, - Highest: EstimateBlockchainHeight(headSeq, peers), - Peers: peers, - } -} - -// PeerBlockchainHeight records blockchain height for an address -type PeerBlockchainHeight struct { - Address string - Height uint64 -} - -func newPeerBlockchainHeights(conns []connection) []PeerBlockchainHeight { - peers := make([]PeerBlockchainHeight, 0, len(conns)) - for _, c := range conns { - if c.State != ConnectionStatePending { - peers = append(peers, PeerBlockchainHeight{ - Address: c.Addr, - Height: c.Height, - }) - } - } - return peers -} - -// EstimateBlockchainHeight estimates the blockchain sync height. -// The highest height reported amongst all peers, and including the node itself, is returned. -func EstimateBlockchainHeight(headSeq uint64, peers []PeerBlockchainHeight) uint64 { - for _, c := range peers { - if c.Height > headSeq { - headSeq = c.Height - } - } - return headSeq -} - -// GetBlockchainProgress returns a *BlockchainProgress -func (dm *Daemon) GetBlockchainProgress(headSeq uint64) *BlockchainProgress { - conns := dm.connections.all() - return newBlockchainProgress(headSeq, conns) -} - -// InjectBroadcastTransaction injects transaction to the unconfirmed pool and broadcasts it. -// If the transaction violates either hard or soft constraints, it is neither injected nor broadcast. -// If the broadcast fails (due to no connections), the transaction is not injected. -// However, the broadcast may fail in practice, without returning an error, -// so this is not foolproof. -// This method is to be used by user-initiated transaction injections. -// For transactions received over the network, use daemon.injectTransaction and check the result to -// decide on repropagation. -func (dm *Daemon) InjectBroadcastTransaction(txn coin.Transaction) error { - return dm.visor.WithUpdateTx("daemon.InjectBroadcastTransaction", func(tx *dbutil.Tx) error { - _, head, inputs, err := dm.visor.InjectUserTransactionTx(tx, txn) - if err != nil { - logger.WithError(err).Error("InjectUserTransactionTx failed") - return err - } - - if err := dm.BroadcastUserTransaction(txn, head, inputs); err != nil { - logger.WithError(err).Error("BroadcastUserTransaction failed") - return err - } - - return nil - }) -} - -// InjectTransaction injects transaction to the unconfirmed pool but does not broadcast it. -// If the transaction violates either hard or soft constraints, it is not injected. -// This method is to be used by user-initiated transaction injections. -// For transactions received over the network, use daemon.injectTransaction and check the result to -// decide on repropagation. -func (dm *Daemon) InjectTransaction(txn coin.Transaction) error { - _, _, _, err := dm.visor.InjectUserTransaction(txn) - return err -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/disconnect_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/disconnect_message_skyencoder.go deleted file mode 100644 index 3e8ed07..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/disconnect_message_skyencoder.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeDisconnectMessage computes the size of an encoded object of type DisconnectMessage -func encodeSizeDisconnectMessage(obj *DisconnectMessage) uint64 { - i0 := uint64(0) - - // obj.ReasonCode - i0 += 2 - - // obj.Reserved - i0 += 4 + uint64(len(obj.Reserved)) - - return i0 -} - -// encodeDisconnectMessage encodes an object of type DisconnectMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeDisconnectMessage(obj *DisconnectMessage) ([]byte, error) { - n := encodeSizeDisconnectMessage(obj) - buf := make([]byte, n) - - if err := encodeDisconnectMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeDisconnectMessageToBuffer encodes an object of type DisconnectMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeDisconnectMessageToBuffer(buf []byte, obj *DisconnectMessage) error { - if uint64(len(buf)) < encodeSizeDisconnectMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.ReasonCode - e.Uint16(obj.ReasonCode) - - // obj.Reserved length check - if uint64(len(obj.Reserved)) > math.MaxUint32 { - return errors.New("obj.Reserved length exceeds math.MaxUint32") - } - - // obj.Reserved length - e.Uint32(uint32(len(obj.Reserved))) - - // obj.Reserved copy - e.CopyBytes(obj.Reserved) - - return nil -} - -// decodeDisconnectMessage decodes an object of type DisconnectMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeDisconnectMessage(buf []byte, obj *DisconnectMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.ReasonCode - i, err := d.Uint16() - if err != nil { - return 0, err - } - obj.ReasonCode = i - } - - { - // obj.Reserved - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.Reserved = make([]byte, length) - - copy(obj.Reserved[:], d.Buffer[:length]) - d.Buffer = d.Buffer[length:] - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeDisconnectMessageExact decodes an object of type DisconnectMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeDisconnectMessageExact(buf []byte, obj *DisconnectMessage) error { - if n, err := decodeDisconnectMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/errors.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/errors.go deleted file mode 100644 index 8cb21b5..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/errors.go +++ /dev/null @@ -1,112 +0,0 @@ -package daemon - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/daemon/gnet" -) - -var ( - // ErrDisconnectVersionNotSupported version is below minimum supported version - ErrDisconnectVersionNotSupported gnet.DisconnectReason = errors.New("Version is below minimum supported version") - // ErrDisconnectIntroductionTimeout timeout - ErrDisconnectIntroductionTimeout gnet.DisconnectReason = errors.New("Introduction timeout") - // ErrDisconnectIsBlacklisted is blacklisted - ErrDisconnectIsBlacklisted gnet.DisconnectReason = errors.New("Blacklisted") - // ErrDisconnectSelf self connnect - ErrDisconnectSelf gnet.DisconnectReason = errors.New("Self connect") - // ErrDisconnectConnectedTwice connect twice - ErrDisconnectConnectedTwice gnet.DisconnectReason = errors.New("Already connected") - // ErrDisconnectIdle idle - ErrDisconnectIdle gnet.DisconnectReason = errors.New("Idle") - // ErrDisconnectNoIntroduction no introduction - ErrDisconnectNoIntroduction gnet.DisconnectReason = errors.New("First message was not an Introduction") - // ErrDisconnectIPLimitReached ip limit reached - ErrDisconnectIPLimitReached gnet.DisconnectReason = errors.New("Maximum number of connections for this IP was reached") - // ErrDisconnectUnexpectedError this is returned when a seemingly impossible error is encountered, e.g. net.Conn.Addr() returns an invalid ip:port - ErrDisconnectUnexpectedError gnet.DisconnectReason = errors.New("Unexpected error") - // ErrDisconnectMaxOutgoingConnectionsReached is returned when connection pool size is greater than the maximum allowed - ErrDisconnectMaxOutgoingConnectionsReached gnet.DisconnectReason = errors.New("Maximum outgoing connections was reached") - // ErrDisconnectBlockchainPubkeyNotMatched is returned when the blockchain pubkey in introduction does not match - ErrDisconnectBlockchainPubkeyNotMatched gnet.DisconnectReason = errors.New("Blockchain pubkey does not match") - // ErrDisconnectBlockchainPubkeyNotProvided is returned when the blockchain pubkey in introduction is not provided - ErrDisconnectBlockchainPubkeyNotProvided gnet.DisconnectReason = errors.New("Blockchain pubkey is not provided") - // ErrDisconnectInvalidExtraData is returned when extra field can't be parsed - ErrDisconnectInvalidExtraData gnet.DisconnectReason = errors.New("Invalid extra data in message") - // ErrDisconnectReceivedDisconnect received a DisconnectMessage - ErrDisconnectReceivedDisconnect gnet.DisconnectReason = errors.New("Received DisconnectMessage") - // ErrDisconnectInvalidUserAgent is returned if the peer provides an invalid user agent - ErrDisconnectInvalidUserAgent gnet.DisconnectReason = errors.New("Invalid user agent") - // ErrDisconnectRequestedByOperator the operator of the node requested a disconnect - ErrDisconnectRequestedByOperator gnet.DisconnectReason = errors.New("Disconnect requested by the node operator") - // ErrDisconnectPeerlistFull the peerlist is full - ErrDisconnectPeerlistFull gnet.DisconnectReason = errors.New("Peerlist is full") - // ErrDisconnectInvalidBurnFactor invalid burn factor in introduction message - ErrDisconnectInvalidBurnFactor gnet.DisconnectReason = errors.New("Invalid burn factor in introduction message") - // ErrDisconnectInvalidMaxTransactionSize invalid max transaction size in introduction message - ErrDisconnectInvalidMaxTransactionSize gnet.DisconnectReason = errors.New("Invalid max transaction size in introduction message") - // ErrDisconnectInvalidMaxDropletPrecision invalid max droplet precision in introduction message - ErrDisconnectInvalidMaxDropletPrecision gnet.DisconnectReason = errors.New("Invalid max droplet precision in introduction message") - - // ErrDisconnectUnknownReason used when mapping an unknown reason code to an error. Is not sent over the network. - ErrDisconnectUnknownReason gnet.DisconnectReason = errors.New("Unknown DisconnectReason") - - disconnectReasonCodes = map[gnet.DisconnectReason]uint16{ - ErrDisconnectUnknownReason: 0, - - ErrDisconnectVersionNotSupported: 1, - ErrDisconnectIntroductionTimeout: 2, - ErrDisconnectIsBlacklisted: 3, - ErrDisconnectSelf: 4, - ErrDisconnectConnectedTwice: 5, - ErrDisconnectIdle: 6, - ErrDisconnectNoIntroduction: 7, - ErrDisconnectIPLimitReached: 8, - ErrDisconnectUnexpectedError: 9, - ErrDisconnectMaxOutgoingConnectionsReached: 10, - ErrDisconnectBlockchainPubkeyNotMatched: 11, - ErrDisconnectInvalidExtraData: 12, - ErrDisconnectReceivedDisconnect: 13, - ErrDisconnectInvalidUserAgent: 14, - ErrDisconnectRequestedByOperator: 15, - ErrDisconnectPeerlistFull: 16, - ErrDisconnectInvalidBurnFactor: 17, - ErrDisconnectInvalidMaxTransactionSize: 18, - ErrDisconnectInvalidMaxDropletPrecision: 19, - - // gnet codes are registered here, but they are not sent in a DISC - // message by gnet. Only daemon sends a DISC packet. - // If gnet chooses to disconnect it will not send a DISC packet. - gnet.ErrDisconnectSetReadDeadlineFailed: 1001, - gnet.ErrDisconnectInvalidMessageLength: 1002, - gnet.ErrDisconnectMalformedMessage: 1003, - gnet.ErrDisconnectUnknownMessage: 1004, - gnet.ErrDisconnectShutdown: 1005, - gnet.ErrDisconnectMessageDecodeUnderflow: 1006, - gnet.ErrDisconnectTruncatedMessageID: 1007, - } - - disconnectCodeReasons map[uint16]gnet.DisconnectReason -) - -func init() { - disconnectCodeReasons = make(map[uint16]gnet.DisconnectReason, len(disconnectReasonCodes)) - - for r, c := range disconnectReasonCodes { - disconnectCodeReasons[c] = r - } -} - -// DisconnectReasonToCode maps a gnet.DisconnectReason to a 16-byte code -func DisconnectReasonToCode(r gnet.DisconnectReason) uint16 { - return disconnectReasonCodes[r] -} - -// DisconnectCodeToReason maps a disconnect code to a gnet.DisconnectReason -func DisconnectCodeToReason(c uint16) gnet.DisconnectReason { - r, ok := disconnectCodeReasons[c] - if !ok { - return ErrDisconnectUnknownReason - } - return r -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_blocks_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_blocks_message_skyencoder.go deleted file mode 100644 index 0ddfa8e..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_blocks_message_skyencoder.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeGetBlocksMessage computes the size of an encoded object of type GetBlocksMessage -func encodeSizeGetBlocksMessage(obj *GetBlocksMessage) uint64 { - i0 := uint64(0) - - // obj.LastBlock - i0 += 8 - - // obj.RequestedBlocks - i0 += 8 - - return i0 -} - -// encodeGetBlocksMessage encodes an object of type GetBlocksMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeGetBlocksMessage(obj *GetBlocksMessage) ([]byte, error) { - n := encodeSizeGetBlocksMessage(obj) - buf := make([]byte, n) - - if err := encodeGetBlocksMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeGetBlocksMessageToBuffer encodes an object of type GetBlocksMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeGetBlocksMessageToBuffer(buf []byte, obj *GetBlocksMessage) error { - if uint64(len(buf)) < encodeSizeGetBlocksMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.LastBlock - e.Uint64(obj.LastBlock) - - // obj.RequestedBlocks - e.Uint64(obj.RequestedBlocks) - - return nil -} - -// decodeGetBlocksMessage decodes an object of type GetBlocksMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeGetBlocksMessage(buf []byte, obj *GetBlocksMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.LastBlock - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.LastBlock = i - } - - { - // obj.RequestedBlocks - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.RequestedBlocks = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeGetBlocksMessageExact decodes an object of type GetBlocksMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeGetBlocksMessageExact(buf []byte, obj *GetBlocksMessage) error { - if n, err := decodeGetBlocksMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_txns_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_txns_message_skyencoder.go deleted file mode 100644 index e6ae83c..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/get_txns_message_skyencoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeGetTxnsMessage computes the size of an encoded object of type GetTxnsMessage -func encodeSizeGetTxnsMessage(obj *GetTxnsMessage) uint64 { - i0 := uint64(0) - - // obj.Transactions - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Transactions)) * i1 - } - - return i0 -} - -// encodeGetTxnsMessage encodes an object of type GetTxnsMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeGetTxnsMessage(obj *GetTxnsMessage) ([]byte, error) { - n := encodeSizeGetTxnsMessage(obj) - buf := make([]byte, n) - - if err := encodeGetTxnsMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeGetTxnsMessageToBuffer encodes an object of type GetTxnsMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeGetTxnsMessageToBuffer(buf []byte, obj *GetTxnsMessage) error { - if uint64(len(buf)) < encodeSizeGetTxnsMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Transactions maxlen check - if len(obj.Transactions) > 256 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transactions length check - if uint64(len(obj.Transactions)) > math.MaxUint32 { - return errors.New("obj.Transactions length exceeds math.MaxUint32") - } - - // obj.Transactions length - e.Uint32(uint32(len(obj.Transactions))) - - // obj.Transactions - for _, x := range obj.Transactions { - - // x - e.CopyBytes(x[:]) - - } - - return nil -} - -// decodeGetTxnsMessage decodes an object of type GetTxnsMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeGetTxnsMessage(buf []byte, obj *GetTxnsMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 256 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions = make([]cipher.SHA256, length) - - for z1 := range obj.Transactions { - { - // obj.Transactions[z1] - if len(d.Buffer) < len(obj.Transactions[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1][:], d.Buffer[:len(obj.Transactions[z1])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1]):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeGetTxnsMessageExact decodes an object of type GetTxnsMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeGetTxnsMessageExact(buf []byte, obj *GetTxnsMessage) error { - if n, err := decodeGetTxnsMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_blocks_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_blocks_message_skyencoder.go deleted file mode 100644 index eaa84a6..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_blocks_message_skyencoder.go +++ /dev/null @@ -1,579 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeGiveBlocksMessage computes the size of an encoded object of type GiveBlocksMessage -func encodeSizeGiveBlocksMessage(obj *GiveBlocksMessage) uint64 { - i0 := uint64(0) - - // obj.Blocks - i0 += 4 - for _, x1 := range obj.Blocks { - i1 := uint64(0) - - // x1.Block.Head.Version - i1 += 4 - - // x1.Block.Head.Time - i1 += 8 - - // x1.Block.Head.BkSeq - i1 += 8 - - // x1.Block.Head.Fee - i1 += 8 - - // x1.Block.Head.PrevHash - i1 += 32 - - // x1.Block.Head.BodyHash - i1 += 32 - - // x1.Block.Head.UxHash - i1 += 32 - - // x1.Block.Body.Transactions - i1 += 4 - for _, x2 := range x1.Block.Body.Transactions { - i2 := uint64(0) - - // x2.Length - i2 += 4 - - // x2.Type - i2++ - - // x2.InnerHash - i2 += 32 - - // x2.Sigs - i2 += 4 - { - i3 := uint64(0) - - // x3 - i3 += 65 - - i2 += uint64(len(x2.Sigs)) * i3 - } - - // x2.In - i2 += 4 - { - i3 := uint64(0) - - // x3 - i3 += 32 - - i2 += uint64(len(x2.In)) * i3 - } - - // x2.Out - i2 += 4 - { - i3 := uint64(0) - - // x3.Address.Version - i3++ - - // x3.Address.Key - i3 += 20 - - // x3.Coins - i3 += 8 - - // x3.Hours - i3 += 8 - - i2 += uint64(len(x2.Out)) * i3 - } - - i1 += i2 - } - - // x1.Sig - i1 += 65 - - i0 += i1 - } - - return i0 -} - -// encodeGiveBlocksMessage encodes an object of type GiveBlocksMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeGiveBlocksMessage(obj *GiveBlocksMessage) ([]byte, error) { - n := encodeSizeGiveBlocksMessage(obj) - buf := make([]byte, n) - - if err := encodeGiveBlocksMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeGiveBlocksMessageToBuffer encodes an object of type GiveBlocksMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeGiveBlocksMessageToBuffer(buf []byte, obj *GiveBlocksMessage) error { - if uint64(len(buf)) < encodeSizeGiveBlocksMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Blocks maxlen check - if len(obj.Blocks) > 128 { - return encoder.ErrMaxLenExceeded - } - - // obj.Blocks length check - if uint64(len(obj.Blocks)) > math.MaxUint32 { - return errors.New("obj.Blocks length exceeds math.MaxUint32") - } - - // obj.Blocks length - e.Uint32(uint32(len(obj.Blocks))) - - // obj.Blocks - for _, x := range obj.Blocks { - - // x.Block.Head.Version - e.Uint32(x.Block.Head.Version) - - // x.Block.Head.Time - e.Uint64(x.Block.Head.Time) - - // x.Block.Head.BkSeq - e.Uint64(x.Block.Head.BkSeq) - - // x.Block.Head.Fee - e.Uint64(x.Block.Head.Fee) - - // x.Block.Head.PrevHash - e.CopyBytes(x.Block.Head.PrevHash[:]) - - // x.Block.Head.BodyHash - e.CopyBytes(x.Block.Head.BodyHash[:]) - - // x.Block.Head.UxHash - e.CopyBytes(x.Block.Head.UxHash[:]) - - // x.Block.Body.Transactions maxlen check - if len(x.Block.Body.Transactions) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Block.Body.Transactions length check - if uint64(len(x.Block.Body.Transactions)) > math.MaxUint32 { - return errors.New("x.Block.Body.Transactions length exceeds math.MaxUint32") - } - - // x.Block.Body.Transactions length - e.Uint32(uint32(len(x.Block.Body.Transactions))) - - // x.Block.Body.Transactions - for _, x := range x.Block.Body.Transactions { - - // x.Length - e.Uint32(x.Length) - - // x.Type - e.Uint8(x.Type) - - // x.InnerHash - e.CopyBytes(x.InnerHash[:]) - - // x.Sigs maxlen check - if len(x.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Sigs length check - if uint64(len(x.Sigs)) > math.MaxUint32 { - return errors.New("x.Sigs length exceeds math.MaxUint32") - } - - // x.Sigs length - e.Uint32(uint32(len(x.Sigs))) - - // x.Sigs - for _, x := range x.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // x.In maxlen check - if len(x.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.In length check - if uint64(len(x.In)) > math.MaxUint32 { - return errors.New("x.In length exceeds math.MaxUint32") - } - - // x.In length - e.Uint32(uint32(len(x.In))) - - // x.In - for _, x := range x.In { - - // x - e.CopyBytes(x[:]) - - } - - // x.Out maxlen check - if len(x.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Out length check - if uint64(len(x.Out)) > math.MaxUint32 { - return errors.New("x.Out length exceeds math.MaxUint32") - } - - // x.Out length - e.Uint32(uint32(len(x.Out))) - - // x.Out - for _, x := range x.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - } - - // x.Sig - e.CopyBytes(x.Sig[:]) - - } - - return nil -} - -// decodeGiveBlocksMessage decodes an object of type GiveBlocksMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeGiveBlocksMessage(buf []byte, obj *GiveBlocksMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Blocks - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 128 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Blocks = make([]coin.SignedBlock, length) - - for z1 := range obj.Blocks { - { - // obj.Blocks[z1].Block.Head.Version - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Head.Version = i - } - - { - // obj.Blocks[z1].Block.Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Head.Time = i - } - - { - // obj.Blocks[z1].Block.Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Head.BkSeq = i - } - - { - // obj.Blocks[z1].Block.Head.Fee - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Head.Fee = i - } - - { - // obj.Blocks[z1].Block.Head.PrevHash - if len(d.Buffer) < len(obj.Blocks[z1].Block.Head.PrevHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Head.PrevHash[:], d.Buffer[:len(obj.Blocks[z1].Block.Head.PrevHash)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Head.PrevHash):] - } - - { - // obj.Blocks[z1].Block.Head.BodyHash - if len(d.Buffer) < len(obj.Blocks[z1].Block.Head.BodyHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Head.BodyHash[:], d.Buffer[:len(obj.Blocks[z1].Block.Head.BodyHash)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Head.BodyHash):] - } - - { - // obj.Blocks[z1].Block.Head.UxHash - if len(d.Buffer) < len(obj.Blocks[z1].Block.Head.UxHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Head.UxHash[:], d.Buffer[:len(obj.Blocks[z1].Block.Head.UxHash)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Head.UxHash):] - } - - { - // obj.Blocks[z1].Block.Body.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Blocks[z1].Block.Body.Transactions = make([]coin.Transaction, length) - - for z5 := range obj.Blocks[z1].Block.Body.Transactions { - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Body.Transactions[z5].Length = i - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Body.Transactions[z5].Type = i - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].InnerHash - if len(d.Buffer) < len(obj.Blocks[z1].Block.Body.Transactions[z5].InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Body.Transactions[z5].InnerHash[:], d.Buffer[:len(obj.Blocks[z1].Block.Body.Transactions[z5].InnerHash)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Body.Transactions[z5].InnerHash):] - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Blocks[z1].Block.Body.Transactions[z5].Sigs = make([]cipher.Sig, length) - - for z7 := range obj.Blocks[z1].Block.Body.Transactions[z5].Sigs { - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Sigs[z7] - if len(d.Buffer) < len(obj.Blocks[z1].Block.Body.Transactions[z5].Sigs[z7]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Body.Transactions[z5].Sigs[z7][:], d.Buffer[:len(obj.Blocks[z1].Block.Body.Transactions[z5].Sigs[z7])]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Body.Transactions[z5].Sigs[z7]):] - } - - } - } - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Blocks[z1].Block.Body.Transactions[z5].In = make([]cipher.SHA256, length) - - for z7 := range obj.Blocks[z1].Block.Body.Transactions[z5].In { - { - // obj.Blocks[z1].Block.Body.Transactions[z5].In[z7] - if len(d.Buffer) < len(obj.Blocks[z1].Block.Body.Transactions[z5].In[z7]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Body.Transactions[z5].In[z7][:], d.Buffer[:len(obj.Blocks[z1].Block.Body.Transactions[z5].In[z7])]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Body.Transactions[z5].In[z7]):] - } - - } - } - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Blocks[z1].Block.Body.Transactions[z5].Out = make([]coin.TransactionOutput, length) - - for z7 := range obj.Blocks[z1].Block.Body.Transactions[z5].Out { - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Version = i - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Key - if len(d.Buffer) < len(obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Key[:], d.Buffer[:len(obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Address.Key):] - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Coins = i - } - - { - // obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Blocks[z1].Block.Body.Transactions[z5].Out[z7].Hours = i - } - - } - } - } - } - } - } - - { - // obj.Blocks[z1].Sig - if len(d.Buffer) < len(obj.Blocks[z1].Sig) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Blocks[z1].Sig[:], d.Buffer[:len(obj.Blocks[z1].Sig)]) - d.Buffer = d.Buffer[len(obj.Blocks[z1].Sig):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeGiveBlocksMessageExact decodes an object of type GiveBlocksMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeGiveBlocksMessageExact(buf []byte, obj *GiveBlocksMessage) error { - if n, err := decodeGiveBlocksMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_peers_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_peers_message_skyencoder.go deleted file mode 100644 index af71acb..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_peers_message_skyencoder.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeGivePeersMessage computes the size of an encoded object of type GivePeersMessage -func encodeSizeGivePeersMessage(obj *GivePeersMessage) uint64 { - i0 := uint64(0) - - // obj.Peers - i0 += 4 - { - i1 := uint64(0) - - // x1.IP - i1 += 4 - - // x1.Port - i1 += 2 - - i0 += uint64(len(obj.Peers)) * i1 - } - - return i0 -} - -// encodeGivePeersMessage encodes an object of type GivePeersMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeGivePeersMessage(obj *GivePeersMessage) ([]byte, error) { - n := encodeSizeGivePeersMessage(obj) - buf := make([]byte, n) - - if err := encodeGivePeersMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeGivePeersMessageToBuffer encodes an object of type GivePeersMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeGivePeersMessageToBuffer(buf []byte, obj *GivePeersMessage) error { - if uint64(len(buf)) < encodeSizeGivePeersMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Peers maxlen check - if len(obj.Peers) > 512 { - return encoder.ErrMaxLenExceeded - } - - // obj.Peers length check - if uint64(len(obj.Peers)) > math.MaxUint32 { - return errors.New("obj.Peers length exceeds math.MaxUint32") - } - - // obj.Peers length - e.Uint32(uint32(len(obj.Peers))) - - // obj.Peers - for _, x := range obj.Peers { - - // x.IP - e.Uint32(x.IP) - - // x.Port - e.Uint16(x.Port) - - } - - return nil -} - -// decodeGivePeersMessage decodes an object of type GivePeersMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeGivePeersMessage(buf []byte, obj *GivePeersMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Peers - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 512 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Peers = make([]IPAddr, length) - - for z1 := range obj.Peers { - { - // obj.Peers[z1].IP - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Peers[z1].IP = i - } - - { - // obj.Peers[z1].Port - i, err := d.Uint16() - if err != nil { - return 0, err - } - obj.Peers[z1].Port = i - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeGivePeersMessageExact decodes an object of type GivePeersMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeGivePeersMessageExact(buf []byte, obj *GivePeersMessage) error { - if n, err := decodeGivePeersMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_txns_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_txns_message_skyencoder.go deleted file mode 100644 index 3524fc1..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/give_txns_message_skyencoder.go +++ /dev/null @@ -1,408 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeGiveTxnsMessage computes the size of an encoded object of type GiveTxnsMessage -func encodeSizeGiveTxnsMessage(obj *GiveTxnsMessage) uint64 { - i0 := uint64(0) - - // obj.Transactions - i0 += 4 - for _, x1 := range obj.Transactions { - i1 := uint64(0) - - // x1.Length - i1 += 4 - - // x1.Type - i1++ - - // x1.InnerHash - i1 += 32 - - // x1.Sigs - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 65 - - i1 += uint64(len(x1.Sigs)) * i2 - } - - // x1.In - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 32 - - i1 += uint64(len(x1.In)) * i2 - } - - // x1.Out - i1 += 4 - { - i2 := uint64(0) - - // x2.Address.Version - i2++ - - // x2.Address.Key - i2 += 20 - - // x2.Coins - i2 += 8 - - // x2.Hours - i2 += 8 - - i1 += uint64(len(x1.Out)) * i2 - } - - i0 += i1 - } - - return i0 -} - -// encodeGiveTxnsMessage encodes an object of type GiveTxnsMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeGiveTxnsMessage(obj *GiveTxnsMessage) ([]byte, error) { - n := encodeSizeGiveTxnsMessage(obj) - buf := make([]byte, n) - - if err := encodeGiveTxnsMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeGiveTxnsMessageToBuffer encodes an object of type GiveTxnsMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeGiveTxnsMessageToBuffer(buf []byte, obj *GiveTxnsMessage) error { - if uint64(len(buf)) < encodeSizeGiveTxnsMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Transactions maxlen check - if len(obj.Transactions) > 256 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transactions length check - if uint64(len(obj.Transactions)) > math.MaxUint32 { - return errors.New("obj.Transactions length exceeds math.MaxUint32") - } - - // obj.Transactions length - e.Uint32(uint32(len(obj.Transactions))) - - // obj.Transactions - for _, x := range obj.Transactions { - - // x.Length - e.Uint32(x.Length) - - // x.Type - e.Uint8(x.Type) - - // x.InnerHash - e.CopyBytes(x.InnerHash[:]) - - // x.Sigs maxlen check - if len(x.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Sigs length check - if uint64(len(x.Sigs)) > math.MaxUint32 { - return errors.New("x.Sigs length exceeds math.MaxUint32") - } - - // x.Sigs length - e.Uint32(uint32(len(x.Sigs))) - - // x.Sigs - for _, x := range x.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // x.In maxlen check - if len(x.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.In length check - if uint64(len(x.In)) > math.MaxUint32 { - return errors.New("x.In length exceeds math.MaxUint32") - } - - // x.In length - e.Uint32(uint32(len(x.In))) - - // x.In - for _, x := range x.In { - - // x - e.CopyBytes(x[:]) - - } - - // x.Out maxlen check - if len(x.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Out length check - if uint64(len(x.Out)) > math.MaxUint32 { - return errors.New("x.Out length exceeds math.MaxUint32") - } - - // x.Out length - e.Uint32(uint32(len(x.Out))) - - // x.Out - for _, x := range x.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - } - - return nil -} - -// decodeGiveTxnsMessage decodes an object of type GiveTxnsMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeGiveTxnsMessage(buf []byte, obj *GiveTxnsMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 256 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions = make([]coin.Transaction, length) - - for z1 := range obj.Transactions { - { - // obj.Transactions[z1].Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Transactions[z1].Length = i - } - - { - // obj.Transactions[z1].Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transactions[z1].Type = i - } - - { - // obj.Transactions[z1].InnerHash - if len(d.Buffer) < len(obj.Transactions[z1].InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].InnerHash[:], d.Buffer[:len(obj.Transactions[z1].InnerHash)]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].InnerHash):] - } - - { - // obj.Transactions[z1].Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].Sigs = make([]cipher.Sig, length) - - for z3 := range obj.Transactions[z1].Sigs { - { - // obj.Transactions[z1].Sigs[z3] - if len(d.Buffer) < len(obj.Transactions[z1].Sigs[z3]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].Sigs[z3][:], d.Buffer[:len(obj.Transactions[z1].Sigs[z3])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].Sigs[z3]):] - } - - } - } - } - - { - // obj.Transactions[z1].In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].In = make([]cipher.SHA256, length) - - for z3 := range obj.Transactions[z1].In { - { - // obj.Transactions[z1].In[z3] - if len(d.Buffer) < len(obj.Transactions[z1].In[z3]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].In[z3][:], d.Buffer[:len(obj.Transactions[z1].In[z3])]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].In[z3]):] - } - - } - } - } - - { - // obj.Transactions[z1].Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transactions[z1].Out = make([]coin.TransactionOutput, length) - - for z3 := range obj.Transactions[z1].Out { - { - // obj.Transactions[z1].Out[z3].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Address.Version = i - } - - { - // obj.Transactions[z1].Out[z3].Address.Key - if len(d.Buffer) < len(obj.Transactions[z1].Out[z3].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transactions[z1].Out[z3].Address.Key[:], d.Buffer[:len(obj.Transactions[z1].Out[z3].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Transactions[z1].Out[z3].Address.Key):] - } - - { - // obj.Transactions[z1].Out[z3].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Coins = i - } - - { - // obj.Transactions[z1].Out[z3].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transactions[z1].Out[z3].Hours = i - } - - } - } - } - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeGiveTxnsMessageExact decodes an object of type GiveTxnsMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeGiveTxnsMessageExact(buf []byte, obj *GiveTxnsMessage) error { - if n, err := decodeGiveTxnsMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/README.md b/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/README.md deleted file mode 100644 index df43774..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/README.md +++ /dev/null @@ -1,7 +0,0 @@ -gnet -==== - -[![GoDoc](http://godoc.org/github.com//skycoin/gnet?status.png)](http://godoc.org/github.com/skycoin/gnet) - -[Godoc generated documentation](https://godoc.org/github.com/skycoin/gnet) - diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/dispatcher.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/dispatcher.go deleted file mode 100644 index f878702..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/dispatcher.go +++ /dev/null @@ -1,209 +0,0 @@ -package gnet - -import ( - "errors" - "fmt" - "math" - "net" - "reflect" - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -var ( - // ErrMsgExceedsMaxLen is returned if trying to send a message that exceeds the configured max length - ErrMsgExceedsMaxLen = errors.New("Message exceeds max message length") -) - -// SendResult result of a single message send -type SendResult struct { - Addr string - Message Message - Error error -} - -func newSendResult(addr string, m Message, err error) SendResult { - return SendResult{ - Addr: addr, - Message: m, - Error: err, - } -} - -// Serializes a Message over a net.Conn -func sendMessage(conn net.Conn, msg Message, timeout time.Duration, maxMsgLength int) error { - m, err := EncodeMessage(msg) - if err != nil { - return err - } - if len(m) > maxMsgLength { - return ErrMsgExceedsMaxLen - } - return sendByteMessage(conn, m, timeout) -} - -// msgIDStringSafe formats msgID bytes to a string that is safe for logging (e.g. not impacted by ascii control chars) -func msgIDStringSafe(msgID [4]byte) string { - x := fmt.Sprintf("%q", msgID) - return x[1 : len(x)-1] // trim quotes that are added by %q formatting -} - -// Event handler that is called after a Connection sends a complete message -func convertToMessage(id uint64, msg []byte, debugPrint bool) (Message, error) { - msgID := [4]byte{} - if len(msg) < len(msgID) { - logger.WithError(ErrDisconnectTruncatedMessageID).WithField("connID", id).Warning() - return nil, ErrDisconnectTruncatedMessageID - } - - copy(msgID[:], msg[:len(msgID)]) - - if debugPrint { - logger.WithField("msgID", msgIDStringSafe(msgID)).Debug("Received message") - } - - msg = msg[len(msgID):] - t, ok := MessageIDReverseMap[msgID] - if !ok { - logger.WithError(ErrDisconnectUnknownMessage).WithFields(logrus.Fields{ - "msgID": msgIDStringSafe(msgID), - "connID": id, - }).Warning() - return nil, ErrDisconnectUnknownMessage - } - - if debugPrint { - logger.WithFields(logrus.Fields{ - "connID": id, - "messageType": fmt.Sprintf("%v", t), - }).Debugf("convertToMessage") - } - - v := reflect.New(t) - m, ok := (v.Interface()).(Message) - if !ok { - // This occurs only when the user registers an interface that does not - // match the Message interface. They should have known about this - // earlier via a call to VerifyMessages - logger.Panic("Message obtained from map does not match Message interface") - return nil, errors.New("MessageIdMaps contain non-Message") - } - - used, err := deserializeMessage(msg, v) - if err != nil { - logger.Critical().WithError(err).WithFields(logrus.Fields{ - "connID": id, - "messageType": fmt.Sprintf("%v", t), - }).Warning("deserializeMessage failed") - return nil, ErrDisconnectMalformedMessage - } - - if used != uint64(len(msg)) { - logger.WithError(ErrDisconnectMessageDecodeUnderflow).WithFields(logrus.Fields{ - "connID": id, - "messageType": fmt.Sprintf("%v", t), - }).Warning() - return nil, ErrDisconnectMessageDecodeUnderflow - } - - return m, nil -} - -// Wraps Serializer.Decode and traps panics as an error -func deserializeMessage(msg []byte, v reflect.Value) (n uint64, err error) { - defer func() { - if r := recover(); r != nil { - logger.Critical().Warningf("Recovering from deserializer panic: %v", r) - switch x := r.(type) { - case string: - err = errors.New(x) - case error: - err = x - default: - err = errors.New("Message deserialization failed") - } - } - }() - - iface := v.Interface() - x, ok := iface.(Serializer) - if !ok { - return 0, errors.New("deserializeMessage object does not have Serializer interface") - } - - return x.Decode(msg) -} - -// EncodeMessage packs a Message into []byte containing length, id and data -func EncodeMessage(msg Serializer) ([]byte, error) { - t := reflect.ValueOf(msg).Elem().Type() - - // Lookup message ID - msgID, succ := MessageIDMap[t] - if !succ { - logger.Panicf("Attempted to serialize message struct not in MessageIDMap: %v", msg) - } - if uint64(len(msgID)) > math.MaxUint32 { - return nil, errors.New("Message ID length exceeds math.MaxUint32") - } - - // Compute size of encoded Message object - bMsgLen := msg.EncodeSize() - if bMsgLen > math.MaxUint32 { - return nil, errors.New("Message length exceeds math.MaxUint32") - } - - // Compute message + message ID length - bLen, err := mathutil.AddUint32(uint32(bMsgLen), uint32(len(msgID))) - if err != nil { - return nil, err - } - - // Serialize total message length - bLenPrefix := encoder.SerializeUint32(bLen) - if uint64(len(bLenPrefix)) > math.MaxUint32 { - return nil, errors.New("Message length prefix length exceeds math.MaxUint32") - } - - mLen, err := mathutil.AddUint32(bLen, uint32(len(bLenPrefix))) - if err != nil { - return nil, err - } - - // Allocate message bytes - m := make([]byte, mLen) - - // Write the total message length to the buffer - copy(m[:], bLenPrefix[:]) - - // Write the message ID to the buffer - copy(m[len(bLenPrefix):], msgID[:]) - - // Encode the message into the message buffer - if err := msg.Encode(m[len(bLenPrefix)+len(msgID):]); err != nil { - return nil, err - } - - return m, nil -} - -// Sends []byte over a net.Conn -var sendByteMessage = func(conn net.Conn, msg []byte, timeout time.Duration) error { - deadline := time.Time{} - if timeout != 0 { - deadline = time.Now().Add(timeout) - } - if err := conn.SetWriteDeadline(deadline); err != nil { - return err - } - if _, err := conn.Write(msg); err != nil { - return &WriteError{ - Err: err, - } - } - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/message.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/message.go deleted file mode 100644 index a67aa39..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/message.go +++ /dev/null @@ -1,140 +0,0 @@ -package gnet - -import ( - "reflect" -) - -const messagePrefixLength = 4 - -// MessagePrefix message prefix identifies a message -type MessagePrefix [messagePrefixLength]byte - -// MessagePrefixFromString creates MessagePrefix from string -func MessagePrefixFromString(prefix string) MessagePrefix { - if len(prefix) == 0 || len(prefix) > 4 { - logger.Panicf("Invalid prefix %s", prefix) - } - p := MessagePrefix{} - for i, c := range prefix { - p[i] = byte(c) - } - for i := len(prefix); i < 4; i++ { - p[i] = 0x00 - } - return p -} - -// Serializer serialization interface -type Serializer interface { - EncodeSize() uint64 - Encode([]byte) error - Decode([]byte) (uint64, error) -} - -// Handler message handler interface -type Handler interface { - // State is user-defined application state that is attached to the Dispatcher. - // If a non-nil error is returned, the connection will be disconnected. - Handle(context *MessageContext, state interface{}) error -} - -// Message message interface -type Message interface { - Handler - Serializer -} - -// MessageContext message context -type MessageContext struct { - ConnID uint64 // connection message was received from - Addr string -} - -// NewMessageContext creates MessageContext -func NewMessageContext(conn *Connection) *MessageContext { - if conn.Conn != nil { - return &MessageContext{ConnID: conn.ID, Addr: conn.Addr()} - } - return &MessageContext{ConnID: conn.ID} -} - -// MessageIDMap maps message types to their ids -var MessageIDMap = make(map[reflect.Type]MessagePrefix) - -// MessageIDReverseMap maps message ids to their types -var MessageIDReverseMap = make(map[MessagePrefix]reflect.Type) - -var registeredMsgsCount = 0 - -// RegisterMessage registers a message struct for recognition by the message handlers. -func RegisterMessage(prefix MessagePrefix, msg interface{}) { - t := reflect.TypeOf(msg) - id := MessagePrefix{} - copy(id[:], prefix[:]) - _, exists := MessageIDReverseMap[id] - if exists { - logger.Panicf("Attempted to register message prefix %s twice", string(id[:])) - } - _, exists = MessageIDMap[t] - if exists { - logger.Panicf("Attempts to register message type %v twice", t) - } - MessageIDMap[t] = id - MessageIDReverseMap[id] = t - - registeredMsgsCount++ -} - -// VerifyMessages calls logger.Panic if message registration violates sanity checks -func VerifyMessages() { - if registeredMsgsCount != len(MessageIDMap) { - logger.Panic("MessageIDMap was altered without using RegisterMessage") - } - if registeredMsgsCount != len(MessageIDReverseMap) { - logger.Panic("MessageIDReverseMap was altered without using RegisterMessage") - } - - for t, k := range MessageIDMap { - // No empty prefixes allowed - if k[0] == 0x00 { - logger.Panic("No empty message prefixes allowed") - } - // No non-null bytes allowed after a nul byte - hasEmpty := false - for _, b := range k { - if b == 0x00 { - hasEmpty = true - } else if hasEmpty { - logger.Panic("No non-null bytes allowed after a nul byte") - } - } - // All characters must be non-whitespace printable ascii chars/digits - // No punctation - for _, b := range k { - if !((b >= '0' && b <= '9') || (b >= 'A' && b <= 'Z') || - (b >= 'a' && b <= 'z') || b == 0x00) { - logger.Panicf("Invalid prefix byte %v", b) - } - } - - // Confirm that all registered messages support the Message interface - // This should only be untrue if the user modified the message map - // directly - mptr := reflect.PtrTo(t) - if !mptr.Implements(reflect.TypeOf((*Message)(nil)).Elem()) { - logger.Panicf("Invalid message at ID %s: Message must implement the gnet.Message interface", string(k[:])) - } - } - if len(MessageIDMap) != len(MessageIDReverseMap) { - logger.Panic("MessageIdMap mismatch") - } - // No empty prefixes - // All prefixes must be 0 padded -} - -// EraseMessages wipes all recorded message types -func EraseMessages() { - MessageIDMap = make(map[reflect.Type]MessagePrefix) - MessageIDReverseMap = make(map[MessagePrefix]reflect.Type) - registeredMsgsCount = 0 -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/pool.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/pool.go deleted file mode 100644 index 7142c39..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/gnet/pool.go +++ /dev/null @@ -1,1066 +0,0 @@ -/* -Package gnet is the core networking library -*/ -package gnet - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "net" - "reflect" - "sync" - "time" - - "io" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/daemon/strand" - "github.com/SkycoinProject/skycoin/src/util/elapse" - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -// DisconnectReason is passed to ConnectionPool's DisconnectCallback -type DisconnectReason error - -const ( - receiveMessageDurationThreshold = 500 * time.Millisecond - readLoopDurationThreshold = 10 * time.Second - sendInMsgChanDurationThreshold = 5 * time.Second - sendLoopDurationThreshold = 500 * time.Millisecond -) - -var ( - // ErrDisconnectSetReadDeadlineFailed set read deadline failed - ErrDisconnectSetReadDeadlineFailed DisconnectReason = errors.New("SetReadDeadline failed") - // ErrDisconnectInvalidMessageLength invalid message length - ErrDisconnectInvalidMessageLength DisconnectReason = errors.New("Invalid message length") - // ErrDisconnectMalformedMessage malformed message - ErrDisconnectMalformedMessage DisconnectReason = errors.New("Malformed message body") - // ErrDisconnectUnknownMessage unknown message - ErrDisconnectUnknownMessage DisconnectReason = errors.New("Unknown message ID") - // ErrDisconnectShutdown shutting down the client - ErrDisconnectShutdown DisconnectReason = errors.New("Shutdown") - // ErrDisconnectMessageDecodeUnderflow message data did not fully decode to a message object - ErrDisconnectMessageDecodeUnderflow DisconnectReason = errors.New("Message data did not fully decode to a message object") - // ErrDisconnectTruncatedMessageID message data was too short to contain a message ID - ErrDisconnectTruncatedMessageID DisconnectReason = errors.New("Message data was too short to contain a message ID") - - // ErrConnectionPoolClosed error message indicates the connection pool is closed - ErrConnectionPoolClosed = errors.New("Connection pool is closed") - // ErrWriteQueueFull write queue is full - ErrWriteQueueFull = errors.New("Write queue full") - // ErrNoReachableConnections when broadcasting a message, no connections were available to send a message to - ErrNoReachableConnections = errors.New("All pool connections are unreachable at this time") - // ErrNoMatchingConnections when broadcasting a message, no connections were found for the provided addresses - ErrNoMatchingConnections = errors.New("No connections found for broadcast addresses") - // ErrPoolEmpty when broadcasting a message, the connection pool was empty - ErrPoolEmpty = errors.New("Connection pool is empty after filtering connections") - // ErrConnectionExists connection exists - ErrConnectionExists = errors.New("Connection exists") - // ErrMaxIncomingConnectionsReached max incoming connections reached - ErrMaxIncomingConnectionsReached = errors.New("Max incoming connections reached") - // ErrMaxOutgoingConnectionsReached max outgoing connections reached - ErrMaxOutgoingConnectionsReached = errors.New("Max outgoing connections reached") - // ErrMaxOutgoingDefaultConnectionsReached max outgoing default connections reached - ErrMaxOutgoingDefaultConnectionsReached = errors.New("Max outgoing default connections reached") - // ErrNoAddresses no addresses were provided to BroadcastMessage - ErrNoAddresses = errors.New("No addresses provided") - - // Logger - logger = logging.MustGetLogger("gnet") -) - -// ReadError connection read error -type ReadError struct { - Err error -} - -func (e ReadError) Error() string { - return fmt.Sprintf("read failed: %v", e.Err) -} - -// WriteError connection read error -type WriteError struct { - Err error -} - -func (e WriteError) Error() string { - return fmt.Sprintf("write failed: %v", e.Err) -} - -// Config gnet config -type Config struct { - // Address to listen on. Leave empty for arbitrary assignment - Address string - // Port to listen on. Set to 0 for arbitrary assignment - Port uint16 - // Maximum total connections. Must be >= MaxOutgoingConnections + MaxDefaultPeerOutgoingConnections. - MaxConnections int - // Maximum outgoing connections - MaxOutgoingConnections int - // Maximum allowed default outgoing connection number - MaxDefaultPeerOutgoingConnections int - // Messages greater than length are rejected and the sender disconnected - MaxIncomingMessageLength int - // Messages greater than length are not sent and an error is reported in a SendResult - MaxOutgoingMessageLength int - // Timeout is the timeout for dialing new connections. Use a - // timeout of 0 to ignore timeout. - DialTimeout time.Duration - // Timeout for reading from a connection. Set to 0 to default to the - // system's timeout - ReadTimeout time.Duration - // Timeout for writing to a connection. Set to 0 to default to the - // system's timeout - WriteTimeout time.Duration - // Message sent event buffers - SendResultsSize int - // Individual connections' send queue size. This should be increased - // if send volume per connection is high, so as not to block - ConnectionWriteQueueSize int - // Triggered on client disconnect - DisconnectCallback DisconnectCallback - // Triggered on client connect - ConnectCallback ConnectCallback - // Triggered on client connect failure - ConnectFailureCallback ConnectFailureCallback - // Print debug logs - DebugPrint bool - // Default "trusted" peers - DefaultConnections []string - // Default connections map - defaultConnections map[string]struct{} -} - -// NewConfig returns a Config with defaults set -func NewConfig() Config { - return Config{ - Address: "", - Port: 0, - MaxConnections: 128, - MaxOutgoingMessageLength: 256 * 1024, - MaxIncomingMessageLength: 1024 * 1024, - MaxDefaultPeerOutgoingConnections: 1, - DialTimeout: time.Second * 30, - ReadTimeout: time.Second * 30, - WriteTimeout: time.Second * 30, - SendResultsSize: 2048, - ConnectionWriteQueueSize: 128, - DisconnectCallback: nil, - ConnectCallback: nil, - DebugPrint: false, - defaultConnections: make(map[string]struct{}), - } -} - -const ( - // Byte size of the length prefix in message, sizeof(int32) - messageLengthPrefixSize = 4 -) - -// Connection is stored by the ConnectionPool -type Connection struct { - // Key in ConnectionPool.Pool - ID uint64 - // TCP connection - Conn net.Conn - // Message buffer - Buffer *bytes.Buffer - // Reference back to ConnectionPool container - ConnectionPool *ConnectionPool - // Last time a message was fully parsed and handled - LastReceived time.Time - // Last time a message was sent to the connection - LastSent time.Time - // Message send queue. - WriteQueue chan Message - Solicited bool -} - -// NewConnection creates a new Connection tied to a ConnectionPool -func NewConnection(pool *ConnectionPool, id uint64, conn net.Conn, writeQueueSize int, solicited bool) *Connection { - return &Connection{ - ID: id, - Conn: conn, - Buffer: &bytes.Buffer{}, - ConnectionPool: pool, - LastReceived: Now(), - LastSent: Now(), - WriteQueue: make(chan Message, writeQueueSize), - Solicited: solicited, - } -} - -// Addr returns remote address -func (conn *Connection) Addr() string { - return conn.Conn.RemoteAddr().String() -} - -// String returns connection address -func (conn *Connection) String() string { - return conn.Addr() -} - -// Close close the connection and write queue -func (conn *Connection) Close() error { - err := conn.Conn.Close() - close(conn.WriteQueue) - conn.Buffer = &bytes.Buffer{} - return err -} - -// DisconnectCallback triggered on client disconnect -type DisconnectCallback func(addr string, id uint64, reason DisconnectReason) - -// ConnectCallback triggered on client connect -type ConnectCallback func(addr string, id uint64, solicited bool) - -// ConnectFailureCallback trigger on client connect failure -type ConnectFailureCallback func(addr string, solicited bool, err error) - -// ConnectionPool connection pool -type ConnectionPool struct { - // Configuration parameters - Config Config - // Channel for async message sending - SendResults chan SendResult - // All connections, indexed by ConnId - pool map[uint64]*Connection - // All connections, indexed by address - addresses map[string]*Connection - // connected default peer connections - defaultOutgoingConnections map[string]struct{} - // connected outgoing connections - outgoingConnections map[string]struct{} - // User-defined state to be passed into message handlers - messageState interface{} - // Connection ID counter - connID uint64 - // Listening connection - listener net.Listener - listenerLock sync.Mutex - // operations channel - reqC chan strand.Request - // quit channel - quit chan struct{} - done chan struct{} - strandDone chan struct{} - wg sync.WaitGroup -} - -// NewConnectionPool creates a new ConnectionPool that will listen on -// Config.Port upon StartListen. State is an application defined object that -// will be passed to a Message's Handle(). -func NewConnectionPool(c Config, state interface{}) (*ConnectionPool, error) { - for _, p := range c.DefaultConnections { - c.defaultConnections[p] = struct{}{} - } - - if c.MaxConnections < c.MaxOutgoingConnections+c.MaxDefaultPeerOutgoingConnections { - return nil, errors.New("MaxConnections must be >= MaxOutgoingConnections + MaxDefaultPeerOutgoingConnections") - } - - return &ConnectionPool{ - Config: c, - pool: make(map[uint64]*Connection), - addresses: make(map[string]*Connection), - defaultOutgoingConnections: make(map[string]struct{}), - outgoingConnections: make(map[string]struct{}), - SendResults: make(chan SendResult, c.SendResultsSize), - messageState: state, - quit: make(chan struct{}), - done: make(chan struct{}), - strandDone: make(chan struct{}), - reqC: make(chan strand.Request), - }, nil -} - -// Run starts the connection pool -func (pool *ConnectionPool) Run() error { - defer close(pool.done) - defer logger.Info("Connection pool closed") - - // The strand processing goroutine must be started before any error can be - // returned from Run(), otherwise the Shutdown() call will block if an error occurred - pool.wg.Add(1) - go func() { - defer pool.wg.Done() - pool.processStrand() - }() - - // start the connection accept loop - addr := fmt.Sprintf("%s:%v", pool.Config.Address, pool.Config.Port) - logger.Infof("Listening for connections on %s...", addr) - - ln, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - pool.listenerLock.Lock() - pool.listener = ln - pool.listenerLock.Unlock() - -loop: - for { - conn, err := ln.Accept() - if err != nil { - // When Accept() returns with a non-nil error, we check the quit - // channel to see if we should continue or quit - select { - case <-pool.quit: - break loop - default: - // without the default case the select will block. - logger.Error(err.Error()) - continue - } - } - - pool.wg.Add(1) - go func() { - defer pool.wg.Done() - if err := pool.handleConnection(conn, false); err != nil { - logger.WithFields(logrus.Fields{ - "addr": conn.RemoteAddr(), - "outgoing": false, - }).WithError(err).Error("pool.handleConnection") - } - }() - } - pool.wg.Wait() - return nil -} - -// RunOffline runs the pool in offline mode. No connections will be accepted, -// but strand requests are processed. -func (pool *ConnectionPool) RunOffline() error { - defer close(pool.done) - pool.processStrand() - return nil -} - -func (pool *ConnectionPool) processStrand() { - defer close(pool.strandDone) - for { - select { - case <-pool.quit: - return - case req := <-pool.reqC: - if err := req.Func(); err != nil { - logger.WithField("operation", req.Name).WithError(err).Error("strand req.Func failed") - } - } - } -} - -// Shutdown gracefully shutdown the connection pool -func (pool *ConnectionPool) Shutdown() { - logger.Info("ConnectionPool.Shutdown called") - close(pool.quit) - logger.Info("ConnectionPool.Shutdown closed pool.quit") - - // Wait for all strand() calls to finish - logger.Info("ConnectionPool.Shutdown waiting for strandDone") - <-pool.strandDone - - logger.Info("ConnectionPool.Shutdown closing the listener") - - // Close to listener to prevent new connections - pool.listenerLock.Lock() - if pool.listener != nil { - if err := pool.listener.Close(); err != nil { - logger.WithError(err).Warning("pool.listener.Close error") - } - } - pool.listener = nil - pool.listenerLock.Unlock() - - logger.Info("ConnectionPool.Shutdown disconnecting all connections") - - // In readData, reader.Read() sometimes blocks instead of returning an error when the - // listener is closed. - // Directly close all connections before closing the listener. - // TODO -- could conn.Close() block too? - pool.disconnectAll() - - if len(pool.pool) != 0 { - logger.Critical().Warning("pool.pool is not empty after calling pool.disconnectAll()") - } - if len(pool.addresses) != 0 { - logger.Critical().Warning("pool.addresses is not empty after calling pool.disconnectAll()") - } - - logger.Info("ConnectionPool.Shutdown waiting for done") - - <-pool.done -} - -// strand ensures all read and write action of pool's member variable are in one thread -func (pool *ConnectionPool) strand(name string, f func() error) error { - name = fmt.Sprintf("daemon.gnet.ConnectionPool.%s", name) - return strand.Strand(logger, pool.reqC, name, f, pool.quit, ErrConnectionPoolClosed) -} - -// ListeningAddress returns the address on which the ConnectionPool listens on -func (pool *ConnectionPool) ListeningAddress() (net.Addr, error) { - if pool.listener == nil { - return nil, errors.New("Not listening, call StartListen first") - } - return pool.listener.Addr(), nil -} - -func (pool *ConnectionPool) canConnect(a string, solicited bool) error { - if pool.isConnExist(a) { - return ErrConnectionExists - } - - if solicited { - if _, ok := pool.Config.defaultConnections[a]; ok && pool.isMaxOutgoingDefaultConnectionsReached() { - return ErrMaxOutgoingDefaultConnectionsReached - } else if pool.isMaxOutgoingConnectionsReached() { - return ErrMaxOutgoingConnectionsReached - } - } else if pool.isMaxIncomingConnectionsReached() { - return ErrMaxIncomingConnectionsReached - } - - return nil -} - -// newConnection creates a new Connection around a net.Conn. Trying to make a connection -// to an address that is already connected will failed. -func (pool *ConnectionPool) newConnection(conn net.Conn, solicited bool) (*Connection, error) { - a := conn.RemoteAddr().String() - - if err := pool.canConnect(a, solicited); err != nil { - return nil, err - } - - if solicited { - pool.outgoingConnections[a] = struct{}{} - - if _, ok := pool.Config.defaultConnections[a]; ok { - pool.defaultOutgoingConnections[a] = struct{}{} - l := len(pool.defaultOutgoingConnections) - logger.WithField("addr", a).Debugf("%d/%d outgoing default connections in use", l, pool.Config.MaxDefaultPeerOutgoingConnections) - } - } - - // ID must start at 1; in case connID overflows back to 0, force it to 1 - pool.connID++ - if pool.connID == 0 { - pool.connID = 1 - } - - nc := NewConnection(pool, pool.connID, conn, pool.Config.ConnectionWriteQueueSize, solicited) - - pool.pool[nc.ID] = nc - pool.addresses[a] = nc - - return nc, nil -} - -// Creates a Connection and begins its read and write loop -func (pool *ConnectionPool) handleConnection(conn net.Conn, solicited bool) error { - defer logger.WithField("addr", conn.RemoteAddr()).Debug("Connection closed") - addr := conn.RemoteAddr().String() - - c, err := func() (c *Connection, err error) { - // TODO -- when limits in newConnection() are reached, should we allow the peer - // to be added anyway, so that we can disconnect it normally and send a disconnect packet? - // It would have to allowed to complete the handshake, otherwise the DISC packet will be ignored - // Or we would have to permit a DISC packet before an INTR - // But the read/write loop would still need to be started - // A ConnectEvent would need to be triggered, or else the DisconnectEvent gnet ID will not match the - // pending connection's zero gnet ID. - defer func() { - if err != nil { - if closeErr := conn.Close(); closeErr != nil { - logger.WithError(closeErr).WithField("addr", addr).Error("handleConnection conn.Close") - } - } - }() - - err = pool.strand("handleConnection", func() error { - var err error - c, err = pool.newConnection(conn, solicited) - if err != nil { - return err - } - - if pool.Config.ConnectCallback != nil { - pool.Config.ConnectCallback(c.Addr(), c.ID, solicited) - } - - return nil - }) - - return - }() - - // TODO -- this error is not fully propagated back to a caller of Connect() so the daemon state - // can get stuck in pending - if err != nil { - logger.WithError(err).WithField("addr", conn.RemoteAddr()).Debug("handleConnection: newConnection failed") - if pool.Config.ConnectFailureCallback != nil { - pool.Config.ConnectFailureCallback(addr, solicited, err) - } - return err - } - - msgC := make(chan []byte, 32) - - type methodErr struct { - method string - err error - } - errC := make(chan methodErr, 3) - - var wg sync.WaitGroup - wg.Add(1) - qc := make(chan struct{}) - go func() { - defer wg.Done() - if err := pool.readLoop(c, msgC, qc); err != nil { - errC <- methodErr{ - method: "readLoop", - err: err, - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := pool.sendLoop(c, pool.Config.WriteTimeout, pool.Config.MaxOutgoingMessageLength, qc); err != nil { - errC <- methodErr{ - method: "sendLoop", - err: err, - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - elapser := elapse.NewElapser(receiveMessageDurationThreshold, logger) - defer elapser.CheckForDone() - - for msg := range msgC { - elapser.Register(fmt.Sprintf("pool.receiveMessage address=%s", addr)) - if err := pool.receiveMessage(c, msg); err != nil { - errC <- methodErr{ - method: "receiveMessage", - err: err, - } - return - } - elapser.CheckForDone() - } - }() - - select { - case <-pool.quit: - if err := conn.Close(); err != nil { - logger.WithError(err).WithField("addr", addr).Error("conn.Close") - } - case mErr := <-errC: - err = mErr.err - logger.WithError(mErr.err).WithFields(logrus.Fields{ - "addr": addr, - "method": mErr.method, - }).Error("handleConnection failure") - - // This Disconnect does not send a DISC packet because it is inside gnet. - // A DISC packet is not useful at this point, because the error is most likely - // that the connection is unreachable. - // However, it may also be that the connection sent data that could not be deserialized - // to a message. - if err := pool.Disconnect(c.Addr(), mErr.err); err != nil { - logger.WithError(err).WithField("addr", addr).Error("Disconnect") - } - } - close(qc) - - wg.Wait() - - return err -} - -func (pool *ConnectionPool) readLoop(conn *Connection, msgChan chan []byte, qc chan struct{}) error { - defer close(msgChan) - // read data from connection - reader := bufio.NewReader(conn.Conn) - buf := make([]byte, 1024) - - elapser := elapse.NewElapser(readLoopDurationThreshold, logger) - sendInMsgChanElapser := elapse.NewElapser(sendInMsgChanDurationThreshold, logger) - - defer elapser.CheckForDone() - defer sendInMsgChanElapser.CheckForDone() - - for { - elapser.Register(fmt.Sprintf("readLoop addr=%s", conn.Addr())) - deadline := time.Time{} - if pool.Config.ReadTimeout != 0 { - deadline = time.Now().Add(pool.Config.ReadTimeout) - } - if err := conn.Conn.SetReadDeadline(deadline); err != nil { - return ErrDisconnectSetReadDeadlineFailed - } - data, err := readData(reader, buf) - if err != nil { - return err - } - - if data == nil { - continue - } - - // write data to buffer - if _, err := conn.Buffer.Write(data); err != nil { - return err - } - // decode data - datas, err := decodeData(conn.Buffer, pool.Config.MaxIncomingMessageLength) - if err != nil { - return err - } - for _, d := range datas { - // use select to avoid the goroutine leak, - // because if msgChan has no receiver this goroutine will leak - select { - case <-qc: - return nil - case <-pool.quit: - return nil - case msgChan <- d: - default: - return errors.New("readLoop msgChan is closed or full") - } - } - sendInMsgChanElapser.CheckForDone() - } -} - -func (pool *ConnectionPool) sendLoop(conn *Connection, timeout time.Duration, maxMsgLength int, qc chan struct{}) error { - elapser := elapse.NewElapser(sendLoopDurationThreshold, logger) - defer elapser.CheckForDone() - - for { - elapser.CheckForDone() - select { - case <-pool.quit: - return nil - case <-qc: - return nil - case m := <-conn.WriteQueue: - elapser.Register(fmt.Sprintf("conn.WriteQueue address=%s", conn.Addr())) - if m == nil { - continue - } - - err := sendMessage(conn.Conn, m, timeout, maxMsgLength) - - // Update last sent before writing to SendResult, - // this allows a write to SendResult to be used as a sync marker, - // since no further action in this block will happen after the write. - if err == nil { - if err := pool.updateLastSent(conn.Addr(), Now()); err != nil { - logger.WithField("addr", conn.Addr()).WithError(err).Warning("updateLastSent failed") - } - } - - sr := newSendResult(conn.Addr(), m, err) - select { - case <-qc: - return nil - case pool.SendResults <- sr: - default: - logger.WithField("addr", conn.Addr()).Warning("SendResults queue full") - } - - if err != nil { - return err - } - } - } -} - -func readData(reader io.Reader, buf []byte) ([]byte, error) { - c, err := reader.Read(buf) - if err != nil { - return nil, &ReadError{ - Err: err, - } - } - if c == 0 { - return nil, nil - } - data := make([]byte, c) - copy(data, buf) - return data, nil -} - -// decode data from buffer. -func decodeData(buf *bytes.Buffer, maxMsgLength int) ([][]byte, error) { - dataArray := [][]byte{} - for buf.Len() > messageLengthPrefixSize { - prefix := buf.Bytes()[:messageLengthPrefixSize] - // decode message length - tmpLength, _, err := encoder.DeserializeUint32(prefix) - if err != nil { - // encoder.DeserializeAtomic should only return an error if there wasn't - // enough data in buf to read the integer, but the prefix buf length - // is already ensured to be long enough - logger.Panicf("encoder.DeserializeUint32 failed unexpectedly: %v", err) - } - - length := int(tmpLength) - - // Disconnect if we received an invalid length - if length < messagePrefixLength { - logger.WithFields(logrus.Fields{ - "length": length, - "messagePrefixLength": messagePrefixLength, - }).Warningf("decodeData: length < messagePrefixLength") - return [][]byte{}, ErrDisconnectInvalidMessageLength - } - - if length > maxMsgLength { - logger.WithFields(logrus.Fields{ - "length": length, - "maxMsgLength": maxMsgLength, - }).Warning("decodeData: length > maxMsgLength") - return [][]byte{}, ErrDisconnectInvalidMessageLength - } - - if buf.Len()-messageLengthPrefixSize < length { - return [][]byte{}, nil - } - - buf.Next(messageLengthPrefixSize) // strip the length prefix - data := make([]byte, length) - _, err = buf.Read(data) - if err != nil { - return [][]byte{}, err - } - - dataArray = append(dataArray, data) - } - return dataArray, nil -} - -// isConnExist check if the connection of address does exist -func (pool *ConnectionPool) isConnExist(addr string) bool { - _, ok := pool.addresses[addr] - return ok -} - -func (pool *ConnectionPool) isMaxIncomingConnectionsReached() bool { - return len(pool.pool) >= (pool.Config.MaxConnections - pool.Config.MaxOutgoingConnections - pool.Config.MaxDefaultPeerOutgoingConnections) -} - -func (pool *ConnectionPool) isMaxOutgoingConnectionsReached() bool { - return len(pool.outgoingConnections) >= pool.Config.MaxOutgoingConnections -} - -func (pool *ConnectionPool) isMaxOutgoingDefaultConnectionsReached() bool { - return len(pool.defaultOutgoingConnections) >= pool.Config.MaxDefaultPeerOutgoingConnections -} - -func (pool *ConnectionPool) updateLastSent(addr string, t time.Time) error { - return pool.strand("updateLastSent", func() error { - if conn, ok := pool.addresses[addr]; ok { - conn.LastSent = t - } - return nil - }) -} - -func (pool *ConnectionPool) updateLastRecv(addr string, t time.Time) error { - return pool.strand("updateLastRecv", func() error { - if conn, ok := pool.addresses[addr]; ok { - conn.LastReceived = t - } - return nil - }) -} - -// GetConnection returns a connection copy if exist -func (pool *ConnectionPool) GetConnection(addr string) (*Connection, error) { - var conn *Connection - if err := pool.strand("GetConnection", func() error { - if c, ok := pool.addresses[addr]; ok { - // copy connection - cc := *c - conn = &cc - } - return nil - }); err != nil { - return nil, err - } - - return conn, nil -} - -// Connect to an address -func (pool *ConnectionPool) Connect(address string) error { - if err := pool.strand("canConnect", func() error { - return pool.canConnect(address, true) - }); err != nil { - return err - } - - logger.WithField("addr", address).Debugf("Making TCP connection") - conn, err := net.DialTimeout("tcp", address, pool.Config.DialTimeout) - if err != nil { - return err - } - - pool.wg.Add(1) - go func() { - defer pool.wg.Done() - if err := pool.handleConnection(conn, true); err != nil { - logger.WithFields(logrus.Fields{ - "addr": conn.RemoteAddr(), - "outgoing": true, - }).WithError(err).Error("pool.handleConnection") - } - }() - return nil -} - -// Disconnect removes a connection from the pool by address and invokes DisconnectCallback -func (pool *ConnectionPool) Disconnect(addr string, r DisconnectReason) error { - return pool.strand("Disconnect", func() error { - logger.WithFields(logrus.Fields{ - "addr": addr, - "reason": r, - }).Debug("Disconnecting") - - // checks if the address is default node address - isDefaultOutgoingConn := false - if _, ok := pool.Config.defaultConnections[addr]; ok { - if _, ok := pool.outgoingConnections[addr]; ok { - isDefaultOutgoingConn = true - } - } - - conn := pool.disconnect(addr, r) - - if conn == nil { - return errors.New("Disconnect: connection does not exist") - } - - if isDefaultOutgoingConn { - l := len(pool.defaultOutgoingConnections) - logger.Debugf("%d/%d default connections in use", l, pool.Config.MaxDefaultPeerOutgoingConnections) - } - - if pool.Config.DisconnectCallback != nil { - pool.Config.DisconnectCallback(addr, conn.ID, r) - } - - return nil - }) -} - -func (pool *ConnectionPool) disconnect(addr string, r DisconnectReason) *Connection { - conn, ok := pool.addresses[addr] - if !ok { - return nil - } - - fields := logrus.Fields{ - "addr": addr, - "id": conn.ID, - } - - delete(pool.pool, conn.ID) - delete(pool.addresses, addr) - delete(pool.defaultOutgoingConnections, addr) - delete(pool.outgoingConnections, addr) - if err := conn.Close(); err != nil { - logger.WithError(err).WithFields(fields).Error("conn.Close") - } - - logger.WithFields(fields).WithField("reason", r).Debug("Closed connection and removed from pool") - - return conn -} - -// disconnectAll disconnects all connections. Only safe to call in Shutdown() -func (pool *ConnectionPool) disconnectAll() { - for _, conn := range pool.pool { - addr := conn.Addr() - pool.disconnect(addr, ErrDisconnectShutdown) - } -} - -// GetConnections returns an copy of pool connections -func (pool *ConnectionPool) GetConnections() ([]Connection, error) { - conns := []Connection{} - if err := pool.strand("GetConnections", func() error { - for _, conn := range pool.pool { - conns = append(conns, *conn) - } - return nil - }); err != nil { - return nil, err - } - return conns, nil -} - -// Size returns the pool size -func (pool *ConnectionPool) Size() (l int, err error) { - err = pool.strand("Size", func() error { - l = len(pool.pool) - return nil - }) - return -} - -// SendMessage sends a Message to a Connection -func (pool *ConnectionPool) SendMessage(addr string, msg Message) error { - if pool.Config.DebugPrint { - logger.WithField("msgType", reflect.TypeOf(msg)).Debug("SendMessage") - } - - return pool.strand("SendMessage", func() error { - if conn, ok := pool.addresses[addr]; ok { - select { - case conn.WriteQueue <- msg: - default: - logger.Critical().WithField("addr", addr).Info("Write queue full") - return ErrWriteQueueFull - } - } else { - return fmt.Errorf("Tried to send %T to %s, but we are not connected", msg, addr) - } - return nil - }) -} - -// BroadcastMessage sends a Message to all connections specified in addrs. -// If a connection does not exist for a given address, it is skipped. -// If no messages were written to any connection, an error is returned. -// Returns the gnet IDs of connections that the message was queued for sending to. -// Note that actual sending can still fail later, if the connection drops before the message is sent. -func (pool *ConnectionPool) BroadcastMessage(msg Message, addrs []string) ([]uint64, error) { - if pool.Config.DebugPrint { - logger.WithField("msgType", reflect.TypeOf(msg)).Debug("BroadcastMessage") - } - - if len(addrs) == 0 { - return nil, ErrNoAddresses - } - - queuedConns := make([]uint64, 0, len(addrs)) - - if err := pool.strand("BroadcastMessage", func() error { - if len(pool.pool) == 0 { - return ErrPoolEmpty - } - - fullWriteQueue := 0 - foundConns := 0 - - for _, addr := range addrs { - if conn, ok := pool.addresses[addr]; ok { - foundConns++ - select { - case conn.WriteQueue <- msg: - queuedConns = append(queuedConns, conn.ID) - default: - logger.Critical().WithFields(logrus.Fields{ - "addr": conn.Addr(), - "id": conn.ID, - }).Info("Write queue full") - fullWriteQueue++ - } - } - } - - if foundConns == 0 { - return ErrNoMatchingConnections - } - - if fullWriteQueue == foundConns { - return ErrNoReachableConnections - } - - return nil - }); err != nil { - return nil, err - } - - return queuedConns, nil -} - -// Unpacks incoming bytes to a Message and calls the message handler. If -// the bytes cannot be converted to a Message, the error is returned as the -// first return value. Otherwise, error will be nil and DisconnectReason will -// be the value returned from the message handler. -func (pool *ConnectionPool) receiveMessage(c *Connection, msg []byte) error { - m, err := convertToMessage(c.ID, msg, pool.Config.DebugPrint) - if err != nil { - return err - } - if err := pool.updateLastRecv(c.Addr(), Now()); err != nil { - return err - } - return m.Handle(NewMessageContext(c), pool.messageState) -} - -// SendPings sends a ping if our last message sent was over pingRate ago -func (pool *ConnectionPool) SendPings(rate time.Duration, msg Message) error { - now := time.Now().UTC() - var addrs []string - if err := pool.strand("SendPings", func() error { - for _, conn := range pool.pool { - if conn.LastSent.Add(rate).Before(now) { - addrs = append(addrs, conn.Addr()) - } - } - return nil - }); err != nil { - return err - } - - for _, a := range addrs { - if err := pool.SendMessage(a, msg); err != nil { - return err - } - } - - return nil -} - -// GetStaleConnections returns connections that have been idle for longer than idleLimit -func (pool *ConnectionPool) GetStaleConnections(idleLimit time.Duration) ([]string, error) { - now := Now() - var idleConns []string - if err := pool.strand("GetStaleConnections", func() error { - for _, conn := range pool.pool { - if conn.LastReceived.Add(idleLimit).Before(now) { - idleConns = append(idleConns, conn.Addr()) - } - } - return nil - }); err != nil { - return nil, err - } - - return idleConns, nil -} - -// Now returns the current UTC time -func Now() time.Time { - return time.Now().UTC() -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/introduction_message_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/introduction_message_skyencoder.go deleted file mode 100644 index 5c210d0..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/introduction_message_skyencoder.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeIntroductionMessage computes the size of an encoded object of type IntroductionMessage -func encodeSizeIntroductionMessage(obj *IntroductionMessage) uint64 { - i0 := uint64(0) - - // obj.Mirror - i0 += 4 - - // obj.ListenPort - i0 += 2 - - // obj.ProtocolVersion - i0 += 4 - - // omitempty - if len(obj.Extra) != 0 { - - // obj.Extra - i0 += 4 + uint64(len(obj.Extra)) - - } - - return i0 -} - -// encodeIntroductionMessage encodes an object of type IntroductionMessage to a buffer allocated to the exact size -// required to encode the object. -func encodeIntroductionMessage(obj *IntroductionMessage) ([]byte, error) { - n := encodeSizeIntroductionMessage(obj) - buf := make([]byte, n) - - if err := encodeIntroductionMessageToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeIntroductionMessageToBuffer encodes an object of type IntroductionMessage to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeIntroductionMessageToBuffer(buf []byte, obj *IntroductionMessage) error { - if uint64(len(buf)) < encodeSizeIntroductionMessage(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Mirror - e.Uint32(obj.Mirror) - - // obj.ListenPort - e.Uint16(obj.ListenPort) - - // obj.ProtocolVersion - e.Int32(obj.ProtocolVersion) - - // omitempty - if len(obj.Extra) != 0 { - - // obj.Extra length check - if uint64(len(obj.Extra)) > math.MaxUint32 { - return errors.New("obj.Extra length exceeds math.MaxUint32") - } - - // obj.Extra length - e.Uint32(uint32(len(obj.Extra))) - - // obj.Extra copy - e.CopyBytes(obj.Extra) - - } - - return nil -} - -// decodeIntroductionMessage decodes an object of type IntroductionMessage from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeIntroductionMessage(buf []byte, obj *IntroductionMessage) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Mirror - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Mirror = i - } - - { - // obj.ListenPort - i, err := d.Uint16() - if err != nil { - return 0, err - } - obj.ListenPort = i - } - - { - // obj.ProtocolVersion - i, err := d.Int32() - if err != nil { - return 0, err - } - obj.ProtocolVersion = i - } - - { - // obj.Extra - - if len(d.Buffer) == 0 { - return uint64(len(buf) - len(d.Buffer)), nil - } - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.Extra = make([]byte, length) - - copy(obj.Extra[:], d.Buffer[:length]) - d.Buffer = d.Buffer[length:] - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeIntroductionMessageExact decodes an object of type IntroductionMessage from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeIntroductionMessageExact(buf []byte, obj *IntroductionMessage) error { - if n, err := decodeIntroductionMessage(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/ip_addr_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/ip_addr_skyencoder.go deleted file mode 100644 index 21ca8c3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/ip_addr_skyencoder.go +++ /dev/null @@ -1,93 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeIPAddr computes the size of an encoded object of type IPAddr -func encodeSizeIPAddr(obj *IPAddr) uint64 { - i0 := uint64(0) - - // obj.IP - i0 += 4 - - // obj.Port - i0 += 2 - - return i0 -} - -// encodeIPAddr encodes an object of type IPAddr to a buffer allocated to the exact size -// required to encode the object. -func encodeIPAddr(obj *IPAddr) ([]byte, error) { - n := encodeSizeIPAddr(obj) - buf := make([]byte, n) - - if err := encodeIPAddrToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeIPAddrToBuffer encodes an object of type IPAddr to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeIPAddrToBuffer(buf []byte, obj *IPAddr) error { - if uint64(len(buf)) < encodeSizeIPAddr(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.IP - e.Uint32(obj.IP) - - // obj.Port - e.Uint16(obj.Port) - - return nil -} - -// decodeIPAddr decodes an object of type IPAddr from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeIPAddr(buf []byte, obj *IPAddr) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.IP - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.IP = i - } - - { - // obj.Port - i, err := d.Uint16() - if err != nil { - return 0, err - } - obj.Port = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeIPAddrExact decodes an object of type IPAddr from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeIPAddrExact(buf []byte, obj *IPAddr) error { - if n, err := decodeIPAddr(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/messages.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/messages.go deleted file mode 100644 index 77bde5b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/messages.go +++ /dev/null @@ -1,1390 +0,0 @@ -package daemon - -import ( - "encoding/binary" - "errors" - "fmt" - "net" - "strings" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/daemon/gnet" - "github.com/SkycoinProject/skycoin/src/daemon/pex" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/iputil" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -// Message represent a packet to be serialized over the network by -// the gnet encoder. -// They must implement the gnet.Message interface -// All concurrent daemon write operations are synchronized by the daemon's -// DaemonLoop(). -// Message do this by caching the gnet.MessageContext received in Handle() -// and placing itself on the messageEvent channel. -// When the message is retrieved from the messageEvent channel, its process() -// method is called. - -// MessageConfig config contains a gnet.Message's 4byte prefix and a -// reference interface -type MessageConfig struct { - Prefix gnet.MessagePrefix - Message interface{} -} - -// NewMessageConfig creates message config -func NewMessageConfig(prefix string, m interface{}) MessageConfig { - return MessageConfig{ - Message: m, - Prefix: gnet.MessagePrefixFromString(prefix), - } -} - -//go:generate skyencoder -unexported -struct IntroductionMessage -//go:generate skyencoder -unexported -struct GivePeersMessage -//go:generate skyencoder -unexported -struct GetBlocksMessage -//go:generate skyencoder -unexported -struct GiveBlocksMessage -//go:generate skyencoder -unexported -struct AnnounceBlocksMessage -//go:generate skyencoder -unexported -struct GetTxnsMessage -//go:generate skyencoder -unexported -struct GiveTxnsMessage -//go:generate skyencoder -unexported -struct AnnounceTxnsMessage -//go:generate skyencoder -unexported -struct DisconnectMessage -//go:generate skyencoder -unexported -struct IPAddr -//go:generate skyencoder -unexported -output-path . -package daemon -struct SignedBlock github.com/SkycoinProject/skycoin/src/coin -//go:generate skyencoder -unexported -output-path . -package daemon -struct Transaction github.com/SkycoinProject/skycoin/src/coin - -// Creates and populates the message configs -func getMessageConfigs() []MessageConfig { - return []MessageConfig{ - NewMessageConfig("INTR", IntroductionMessage{}), - NewMessageConfig("GETP", GetPeersMessage{}), - NewMessageConfig("GIVP", GivePeersMessage{}), - NewMessageConfig("PING", PingMessage{}), - NewMessageConfig("PONG", PongMessage{}), - NewMessageConfig("GETB", GetBlocksMessage{}), - NewMessageConfig("GIVB", GiveBlocksMessage{}), - NewMessageConfig("ANNB", AnnounceBlocksMessage{}), - NewMessageConfig("GETT", GetTxnsMessage{}), - NewMessageConfig("GIVT", GiveTxnsMessage{}), - NewMessageConfig("ANNT", AnnounceTxnsMessage{}), - NewMessageConfig("DISC", DisconnectMessage{}), - } -} - -// MessagesConfig slice of MessageConfig -type MessagesConfig struct { - // Message ID prefices - Messages []MessageConfig -} - -// NewMessagesConfig creates messages config -func NewMessagesConfig() MessagesConfig { - return MessagesConfig{ - Messages: getMessageConfigs(), - } -} - -// Register registers our Messages with gnet -func (msc *MessagesConfig) Register() { - for _, mc := range msc.Messages { - gnet.RegisterMessage(mc.Prefix, mc.Message) - } - gnet.VerifyMessages() -} - -// Messages messages struct -type Messages struct { - Config MessagesConfig -} - -// NewMessages creates Messages -func NewMessages(c MessagesConfig) *Messages { - return &Messages{ - Config: c, - } -} - -// IPAddr compact representation of IP:Port -type IPAddr struct { - IP uint32 - Port uint16 -} - -// NewIPAddr returns an IPAddr from an ip:port string. -func NewIPAddr(addr string) (ipaddr IPAddr, err error) { - ips, port, err := iputil.SplitAddr(addr) - if err != nil { - return - } - - // TODO -- support ipv6 - ipb := net.ParseIP(ips).To4() - if ipb == nil { - err = errors.New("Ignoring IPv6 address") - return - } - - ip := binary.BigEndian.Uint32(ipb) - ipaddr.IP = ip - ipaddr.Port = port - return -} - -// String returns IPAddr as "ip:port" -func (ipa IPAddr) String() string { - ipb := make([]byte, 4) - binary.BigEndian.PutUint32(ipb, ipa.IP) - return fmt.Sprintf("%s:%d", net.IP(ipb).String(), ipa.Port) -} - -// asyncMessage messages that perform an action when received must implement this interface. -// process() is called after the message is pulled off of messageEvent channel. -// Messages should place themselves on the messageEvent channel in their -// Handle() method required by gnet. -type asyncMessage interface { - process(d daemoner) -} - -// GetPeersMessage sent to request peers -type GetPeersMessage struct { - addr string `enc:"-"` -} - -// NewGetPeersMessage creates GetPeersMessage -func NewGetPeersMessage() *GetPeersMessage { - return &GetPeersMessage{} -} - -// EncodeSize implements gnet.Serializer -func (gpm *GetPeersMessage) EncodeSize() uint64 { - return 0 -} - -// Encode implements gnet.Serializer -func (gpm *GetPeersMessage) Encode(buf []byte) error { - return nil -} - -// Decode implements gnet.Serializer -func (gpm *GetPeersMessage) Decode(buf []byte) (uint64, error) { - return 0, nil -} - -// Handle handles message -func (gpm *GetPeersMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - gpm.addr = mc.Addr - return daemon.(daemoner).recordMessageEvent(gpm, mc) -} - -// process Notifies the Pex instance that peers were requested -func (gpm *GetPeersMessage) process(d daemoner) { - if d.pexConfig().Disabled { - return - } - - if err := d.sendRandomPeers(gpm.addr); err != nil { - logger.WithField("addr", gpm.addr).WithError(err).Error("SendRandomPeers failed") - } -} - -// GivePeersMessage sent in response to GetPeersMessage -type GivePeersMessage struct { - Peers []IPAddr `enc:",maxlen=512"` - c *gnet.MessageContext `enc:"-"` -} - -// NewGivePeersMessage []*pex.Peer is converted to []IPAddr for binary transmission -// If the size of the message would exceed maxMsgLength, the IPAddr slice is truncated. -func NewGivePeersMessage(peers []pex.Peer, maxMsgLength uint64) *GivePeersMessage { - if len(peers) > 512 { - peers = peers[:512] - } - - ipaddrs := make([]IPAddr, 0, len(peers)) - for _, ps := range peers { - ipaddr, err := NewIPAddr(ps.Addr) - if err != nil { - logger.WithError(err).WithField("addr", ps.Addr).Warning("GivePeersMessage skipping invalid address") - continue - } - ipaddrs = append(ipaddrs, ipaddr) - } - - m := &GivePeersMessage{ - Peers: ipaddrs, - } - truncateGivePeersMessage(m, maxMsgLength) - return m -} - -// truncateGivePeersMessage truncates the blocks in GivePeersMessage to fit inside of MaxOutgoingMessageLength -func truncateGivePeersMessage(m *GivePeersMessage, maxMsgLength uint64) { - // The message length will include a 4 byte message type prefix. - // Panic if the prefix can't fit, otherwise we can't adjust the uint64 safely - if maxMsgLength < 4 { - logger.Panic("maxMsgLength must be >= 4") - } - - maxMsgLength -= 4 - - // Measure the current message size, if it fits, return - n := m.EncodeSize() - if n <= maxMsgLength { - return - } - - // Measure the size of an empty message - var mm GivePeersMessage - size := mm.EncodeSize() - - // Measure the size of the peers, advancing the slice index until it reaches capacity - index := -1 - for i, ip := range m.Peers { - x := encodeSizeIPAddr(&ip) - if size+x > maxMsgLength { - break - } - size += x - index = i - } - - m.Peers = m.Peers[:index+1] - - if len(m.Peers) == 0 { - logger.Critical().Error("truncateGivePeersMessage truncated peers to an empty slice") - } -} - -// EncodeSize implements gnet.Serializer -func (gpm *GivePeersMessage) EncodeSize() uint64 { - return encodeSizeGivePeersMessage(gpm) -} - -// Encode implements gnet.Serializer -func (gpm *GivePeersMessage) Encode(buf []byte) error { - return encodeGivePeersMessageToBuffer(buf, gpm) -} - -// Decode implements gnet.Serializer -func (gpm *GivePeersMessage) Decode(buf []byte) (uint64, error) { - return decodeGivePeersMessage(buf, gpm) -} - -// GetPeers is required by the pex.GivePeersMessage interface. -// It returns the peers contained in the message as an array of "ip:port" -// strings. -func (gpm *GivePeersMessage) GetPeers() []string { - peers := make([]string, len(gpm.Peers)) - for i, ipaddr := range gpm.Peers { - peers[i] = ipaddr.String() - } - return peers -} - -// Handle handle message -func (gpm *GivePeersMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - gpm.c = mc - return daemon.(daemoner).recordMessageEvent(gpm, mc) -} - -// process Notifies the Pex instance that peers were received -func (gpm *GivePeersMessage) process(d daemoner) { - if d.pexConfig().Disabled { - return - } - - peers := gpm.GetPeers() - - if len(peers) == 0 { - return - } - - // Cap the number of peers printed in the log to prevent log spam abuse - peersToFmt := peers - if len(peersToFmt) > 30 { - peersToFmt = peersToFmt[:30] - } - peersStr := strings.Join(peersToFmt, ", ") - if len(peers) != len(peersToFmt) { - peersStr += fmt.Sprintf(" and %d more", len(peers)-len(peersToFmt)) - } - - logger.WithFields(logrus.Fields{ - "addr": gpm.c.Addr, - "gnetID": gpm.c.ConnID, - "peers": peersStr, - "count": len(peers), - }).Debug("Received peers via PEX") - - d.addPeers(peers) -} - -// IntroductionMessage is sent on first connect by both parties -type IntroductionMessage struct { - c *gnet.MessageContext `enc:"-"` - UserAgent useragent.Data `enc:"-"` - UnconfirmedVerifyTxn params.VerifyTxn `enc:"-"` - GenesisHash cipher.SHA256 `enc:"-"` - - // Mirror is a random value generated on client startup that is used to identify self-connections - Mirror uint32 - // ListenPort is the port that this client is listening on - ListenPort uint16 - // Protocol version - ProtocolVersion int32 - - // Extra is extra bytes added to the struct to accommodate multiple versions of this packet. - // Currently it contains the blockchain pubkey and user agent but will accept a client that does not provide it. - // If any of this data is provided, it must include a valid blockchain pubkey and a valid user agent string (maxlen=256). - // Contents of extra: - // ExtraByte uint32 // length prefix of []byte - // Pubkey cipher.Pubkey // blockchain pubkey - // BurnFactor uint32 // burn factor for announced txns - // MaxTxnSize uint32 // max txn size for announced txns - // MaxDropletPrecision uint8 // maximum number of decimal places for announced txns - // UserAgent string `enc:",maxlen=256"` - // GenesisHash cipher.SHA256 // genesis block hash - Extra []byte `enc:",omitempty"` -} - -// NewIntroductionMessage creates introduction message -func NewIntroductionMessage(mirror uint32, version int32, port uint16, pubkey cipher.PubKey, userAgent string, verifyParams params.VerifyTxn, genesisHash cipher.SHA256) *IntroductionMessage { - return &IntroductionMessage{ - Mirror: mirror, - ProtocolVersion: version, - ListenPort: port, - Extra: newIntroductionMessageExtra(pubkey, userAgent, verifyParams, genesisHash), - } -} - -func newIntroductionMessageExtra(pubkey cipher.PubKey, userAgent string, verifyParams params.VerifyTxn, genesisHash cipher.SHA256) []byte { - if len(userAgent) > useragent.MaxLen { - logger.WithFields(logrus.Fields{ - "userAgent": userAgent, - "maxLen": useragent.MaxLen, - }).Panic("user agent exceeds max len") - } - if userAgent == "" { - logger.Panic("user agent is required") - } - useragent.MustParse(userAgent) - - if err := verifyParams.Validate(); err != nil { - logger.Panic(err) - } - - userAgentSerialized := encoder.SerializeString(userAgent) - verifyParamsSerialized := encoder.Serialize(verifyParams) - - extra := make([]byte, len(pubkey)+len(userAgentSerialized)+len(verifyParamsSerialized)+len(genesisHash)) - - copy(extra[:len(pubkey)], pubkey[:]) - i := len(pubkey) - copy(extra[i:], verifyParamsSerialized) - i += len(verifyParamsSerialized) - copy(extra[i:], userAgentSerialized) - i += len(userAgentSerialized) - copy(extra[i:i+len(genesisHash)], genesisHash[:]) - - return extra -} - -// EncodeSize implements gnet.Serializer -func (intro *IntroductionMessage) EncodeSize() uint64 { - return encodeSizeIntroductionMessage(intro) -} - -// Encode implements gnet.Serializer -func (intro *IntroductionMessage) Encode(buf []byte) error { - return encodeIntroductionMessageToBuffer(buf, intro) -} - -// Decode implements gnet.Serializer -func (intro *IntroductionMessage) Decode(buf []byte) (uint64, error) { - return decodeIntroductionMessage(buf, intro) -} - -// Handle records message event in daemon -func (intro *IntroductionMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - intro.c = mc - return daemon.(daemoner).recordMessageEvent(intro, mc) -} - -// process an event queued by Handle() -func (intro *IntroductionMessage) process(d daemoner) { - addr := intro.c.Addr - - fields := logrus.Fields{ - "addr": addr, - "gnetID": intro.c.ConnID, - "listenPort": intro.ListenPort, - } - - logger.WithFields(fields).Debug("IntroductionMessage.process") - - if err := intro.Verify(d.DaemonConfig(), logrus.Fields{ - "addr": addr, - "gnetID": intro.c.ConnID, - }); err != nil { - if err := d.Disconnect(addr, err); err != nil { - logger.WithError(err).WithFields(fields).Warning("Disconnect") - } - return - } - - if _, err := d.connectionIntroduced(addr, intro.c.ConnID, intro); err != nil { - logger.WithError(err).WithFields(fields).Warning("connectionIntroduced failed") - var reason gnet.DisconnectReason - switch err { - // It is hypothetically possible that a message would get processed after - // a disconnect event for a given connection. - // In this case, drop the packet. - // Do not perform a disconnect, since this would operate on the new connection. - // This should be prevented by an earlier check in daemon.onMessageEvent() - case ErrConnectionGnetIDMismatch, ErrConnectionStateNotConnected, ErrConnectionAlreadyIntroduced: - logger.Critical().WithError(err).WithFields(fields).Warning("IntroductionMessage.process connection state out of order") - return - case ErrConnectionNotExist: - return - case ErrConnectionIPMirrorExists: - reason = ErrDisconnectConnectedTwice - case pex.ErrPeerlistFull: - reason = ErrDisconnectPeerlistFull - // Send more peers before disconnecting - logger.WithFields(fields).Debug("Sending peers before disconnecting due to peer list full") - if err := d.sendRandomPeers(addr); err != nil { - logger.WithError(err).WithFields(fields).Warning("sendRandomPeers failed") - } - default: - reason = ErrDisconnectUnexpectedError - } - - if err := d.Disconnect(addr, reason); err != nil { - logger.WithError(err).WithFields(fields).Warning("Disconnect") - } - - return - } - - // Request blocks immediately after they're confirmed - if err := d.requestBlocksFromAddr(addr); err != nil { - logger.WithError(err).WithFields(fields).Warning("requestBlocksFromAddr") - } else { - logger.WithFields(fields).Debug("Requested blocks") - } - - // Announce unconfirmed txns - if err := d.announceAllValidTxns(); err != nil { - logger.WithError(err).Warning("announceAllValidTxns failed") - } -} - -// Verify checks if the introduction message is valid returning the appropriate error -func (intro *IntroductionMessage) Verify(dc DaemonConfig, logFields logrus.Fields) error { - // Disconnect if this is a self connection (we have the same mirror value) - if intro.Mirror == dc.Mirror { - logger.WithFields(logFields).WithField("mirror", intro.Mirror).Info("Remote mirror value matches ours") - return ErrDisconnectSelf - } - - // Disconnect if peer version is not within the supported range - if intro.ProtocolVersion < dc.MinProtocolVersion { - logger.WithFields(logFields).WithFields(logrus.Fields{ - "protocolVersion": intro.ProtocolVersion, - "minProtocolVersion": dc.MinProtocolVersion, - }).Info("protocol version below minimum supported protocol version") - return ErrDisconnectVersionNotSupported - } - - logger.WithFields(logFields).WithField("protocolVersion", intro.ProtocolVersion).Debug("Peer protocol version accepted") - - // v24 does not send blockchain pubkey or user agent - // v25 sends blockchain pubkey and user agent - // v24 and v25 check the blockchain pubkey and user agent, would accept message with no Pubkey and user agent - // v26 would check the blockchain pubkey and reject if not matched or not provided, and parses a user agent - // v26 adds genesis hash - // v27 would require and check the genesis hash - extraLen := len(intro.Extra) - if extraLen == 0 { - logger.WithFields(logFields).Warning("Blockchain pubkey is not provided") - return ErrDisconnectBlockchainPubkeyNotProvided - } - - var bcPubKey cipher.PubKey - if extraLen < len(bcPubKey) { - logger.WithFields(logFields).Warning("Extra data length does not meet the minimum requirement") - return ErrDisconnectInvalidExtraData - } - copy(bcPubKey[:], intro.Extra[:len(bcPubKey)]) - - if dc.BlockchainPubkey != bcPubKey { - logger.WithFields(logFields).WithFields(logrus.Fields{ - "pubkey": bcPubKey.Hex(), - "daemonPubkey": dc.BlockchainPubkey.Hex(), - }).Warning("Blockchain pubkey does not match") - return ErrDisconnectBlockchainPubkeyNotMatched - } - - i := len(bcPubKey) - if extraLen < i+9 { - logger.WithFields(logFields).Warning("IntroductionMessage transaction verification parameters could not be deserialized: not enough data") - return ErrDisconnectInvalidExtraData - } - if err := encoder.DeserializeRawExact(intro.Extra[i:i+9], &intro.UnconfirmedVerifyTxn); err != nil { - // This should not occur due to the previous length check - logger.Critical().WithError(err).WithFields(logFields).Warning("unconfirmedVerifyTxn params could not be deserialized") - return ErrDisconnectInvalidExtraData - } - i += 9 - - if err := intro.UnconfirmedVerifyTxn.Validate(); err != nil { - logger.WithError(err).WithFields(logFields).WithFields(logrus.Fields{ - "burnFactor": intro.UnconfirmedVerifyTxn.BurnFactor, - "maxTransactionSize": intro.UnconfirmedVerifyTxn.MaxTransactionSize, - "maxDropletPrecision": intro.UnconfirmedVerifyTxn.MaxDropletPrecision, - }).Warning("Invalid unconfirmedVerifyTxn params") - switch err { - case params.ErrInvalidBurnFactor: - return ErrDisconnectInvalidBurnFactor - case params.ErrInvalidMaxTransactionSize: - return ErrDisconnectInvalidMaxTransactionSize - case params.ErrInvalidMaxDropletPrecision: - return ErrDisconnectInvalidMaxDropletPrecision - default: - return ErrDisconnectUnexpectedError - } - } - - userAgentSerialized := intro.Extra[i:] - userAgent, userAgentLen, err := encoder.DeserializeString(userAgentSerialized, useragent.MaxLen) - if err != nil { - logger.WithError(err).WithFields(logFields).Warning("Extra data user agent string could not be deserialized") - return ErrDisconnectInvalidExtraData - } - - intro.UserAgent, err = useragent.Parse(useragent.Sanitize(userAgent)) - if err != nil { - logger.WithError(err).WithFields(logFields).WithField("userAgent", userAgent).Warning("User agent is invalid") - return ErrDisconnectInvalidUserAgent - } - i += int(userAgentLen) - - remainingLen := extraLen - i - if remainingLen > 0 && remainingLen < len(intro.GenesisHash) { - logger.WithFields(logFields).Warning("Extra data genesis hash could not be deserialized: not enough data") - return ErrDisconnectInvalidExtraData - } - copy(intro.GenesisHash[:], intro.Extra[i:]) - - return nil -} - -// PingMessage Sent to keep a connection alive. A PongMessage is sent in reply. -type PingMessage struct { - c *gnet.MessageContext `enc:"-"` -} - -// EncodeSize implements gnet.Serializer -func (ping *PingMessage) EncodeSize() uint64 { - return 0 -} - -// Encode implements gnet.Serializer -func (ping *PingMessage) Encode(buf []byte) error { - return nil -} - -// Decode implements gnet.Serializer -func (ping *PingMessage) Decode(buf []byte) (uint64, error) { - return 0, nil -} - -// Handle implements the Messager interface -func (ping *PingMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - ping.c = mc - return daemon.(daemoner).recordMessageEvent(ping, mc) -} - -// process Sends a PongMessage to the sender of PingMessage -func (ping *PingMessage) process(d daemoner) { - fields := logrus.Fields{ - "addr": ping.c.Addr, - "gnetID": ping.c.ConnID, - } - - if d.DaemonConfig().LogPings { - logger.WithFields(fields).Debug("Replying to ping") - } - if err := d.sendMessage(ping.c.Addr, &PongMessage{}); err != nil { - logger.WithFields(fields).WithError(err).Error("Send PongMessage failed") - } -} - -// PongMessage Sent in reply to a PingMessage. No action is taken when this is received. -type PongMessage struct { -} - -// EncodeSize implements gnet.Serializer -func (pong *PongMessage) EncodeSize() uint64 { - return 0 -} - -// Encode implements gnet.Serializer -func (pong *PongMessage) Encode(buf []byte) error { - return nil -} - -// Decode implements gnet.Serializer -func (pong *PongMessage) Decode(buf []byte) (uint64, error) { - return 0, nil -} - -// Handle handles message -func (pong *PongMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - // There is nothing to do; gnet updates Connection.LastMessage internally - // when this is received - if daemon.(daemoner).DaemonConfig().LogPings { - logger.WithFields(logrus.Fields{ - "addr": mc.Addr, - "gnetID": mc.ConnID, - }).Debug("Received pong") - } - return nil -} - -// DisconnectMessage sent to a peer before disconnecting, indicating the reason for disconnect -type DisconnectMessage struct { - c *gnet.MessageContext `enc:"-"` - reason gnet.DisconnectReason `enc:"-"` - - // Error code - ReasonCode uint16 - - // Reserved for future use - Reserved []byte -} - -// NewDisconnectMessage creates message sent to reject previously received message -func NewDisconnectMessage(reason gnet.DisconnectReason) *DisconnectMessage { - return &DisconnectMessage{ - reason: reason, - ReasonCode: DisconnectReasonToCode(reason), - Reserved: nil, - } -} - -// EncodeSize implements gnet.Serializer -func (dm *DisconnectMessage) EncodeSize() uint64 { - return encodeSizeDisconnectMessage(dm) -} - -// Encode implements gnet.Serializer -func (dm *DisconnectMessage) Encode(buf []byte) error { - return encodeDisconnectMessageToBuffer(buf, dm) -} - -// Decode implements gnet.Serializer -func (dm *DisconnectMessage) Decode(buf []byte) (uint64, error) { - return decodeDisconnectMessage(buf, dm) -} - -// Handle an event queued by Handle() -func (dm *DisconnectMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - dm.c = mc - return daemon.(daemoner).recordMessageEvent(dm, mc) -} - -// process disconnect message by reflexively disconnecting -func (dm *DisconnectMessage) process(d daemoner) { - logger.WithFields(logrus.Fields{ - "addr": dm.c.Addr, - "gnetID": dm.c.ConnID, - "code": dm.ReasonCode, - "reason": DisconnectCodeToReason(dm.ReasonCode), - }).Infof("DisconnectMessage received") - - if err := d.disconnectNow(dm.c.Addr, ErrDisconnectReceivedDisconnect); err != nil { - logger.WithError(err).WithField("addr", dm.c.Addr).Warning("disconnectNow") - } -} - -// GetBlocksMessage sent to request blocks since LastBlock -type GetBlocksMessage struct { - LastBlock uint64 - RequestedBlocks uint64 - c *gnet.MessageContext `enc:"-"` -} - -// NewGetBlocksMessage creates GetBlocksMessage -func NewGetBlocksMessage(lastBlock, requestedBlocks uint64) *GetBlocksMessage { - return &GetBlocksMessage{ - LastBlock: lastBlock, - RequestedBlocks: requestedBlocks, - } -} - -// EncodeSize implements gnet.Serializer -func (gbm *GetBlocksMessage) EncodeSize() uint64 { - return encodeSizeGetBlocksMessage(gbm) -} - -// Encode implements gnet.Serializer -func (gbm *GetBlocksMessage) Encode(buf []byte) error { - return encodeGetBlocksMessageToBuffer(buf, gbm) -} - -// Decode implements gnet.Serializer -func (gbm *GetBlocksMessage) Decode(buf []byte) (uint64, error) { - return decodeGetBlocksMessage(buf, gbm) -} - -// Handle handles message -func (gbm *GetBlocksMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - gbm.c = mc - return daemon.(daemoner).recordMessageEvent(gbm, mc) -} - -// process should send number to be requested, with request -func (gbm *GetBlocksMessage) process(d daemoner) { - dc := d.DaemonConfig() - if dc.DisableNetworking { - return - } - - fields := logrus.Fields{ - "addr": gbm.c.Addr, - "gnetID": gbm.c.ConnID, - } - - // Record this as this peer's highest block - d.recordPeerHeight(gbm.c.Addr, gbm.c.ConnID, gbm.LastBlock) - - // Cap the number of requested blocks (TODO - necessary since we have size limits enforced later?) - requestedBlocks := gbm.RequestedBlocks - if requestedBlocks > dc.MaxGetBlocksResponseCount { - logger.WithFields(logrus.Fields{ - "requestedBlocks": requestedBlocks, - "maxRequestedBlocks": dc.MaxGetBlocksResponseCount, - }).WithFields(fields).Debug("GetBlocksMessage.RequestedBlocks value exceeds configured limit, reducing") - requestedBlocks = dc.MaxGetBlocksResponseCount - } - - // Fetch and return signed blocks since LastBlock - blocks, err := d.getSignedBlocksSince(gbm.LastBlock, requestedBlocks) - if err != nil { - logger.WithFields(fields).WithError(err).Error("getSignedBlocksSince failed") - return - } - - if len(blocks) == 0 { - return - } - - logger.WithFields(fields).Debugf("GetBlocksMessage: replying with %d blocks after block %d", len(blocks), gbm.LastBlock) - - m := NewGiveBlocksMessage(blocks, dc.MaxOutgoingMessageLength) - if len(m.Blocks) != len(blocks) { - logger.WithField("startBlockSeq", blocks[0].Head.BkSeq).WithFields(fields).Warningf("NewGiveBlocksMessage truncated %d blocks to %d blocks", len(blocks), len(m.Blocks)) - } - - if err := d.sendMessage(gbm.c.Addr, m); err != nil { - logger.WithFields(fields).WithError(err).Error("Send GiveBlocksMessage failed") - } -} - -// GiveBlocksMessage sent in response to GetBlocksMessage, or unsolicited -type GiveBlocksMessage struct { - Blocks []coin.SignedBlock `enc:",maxlen=128"` - c *gnet.MessageContext `enc:"-"` -} - -// NewGiveBlocksMessage creates GiveBlocksMessage. -// If the size of message would exceed maxMsgLength, the block slice is truncated. -func NewGiveBlocksMessage(blocks []coin.SignedBlock, maxMsgLength uint64) *GiveBlocksMessage { - if len(blocks) > 128 { - blocks = blocks[:128] - } - m := &GiveBlocksMessage{ - Blocks: blocks, - } - truncateGiveBlocksMessage(m, maxMsgLength) - return m -} - -// truncateGiveBlocksMessage truncates the blocks in GiveBlocksMessage to fit inside of MaxOutgoingMessageLength -func truncateGiveBlocksMessage(m *GiveBlocksMessage, maxMsgLength uint64) { - // The message length will include a 4 byte message type prefix. - // Panic if the prefix can't fit, otherwise we can't adjust the uint64 safely - if maxMsgLength < 4 { - logger.Panic("maxMsgLength must be >= 4") - } - - maxMsgLength -= 4 - - // Measure the current message size, if it fits, return - n := m.EncodeSize() - if n <= maxMsgLength { - return - } - - // Measure the size of an empty message - var mm GiveBlocksMessage - size := mm.EncodeSize() - - // Measure the size of the blocks, advancing the slice index until it reaches capacity - index := -1 - for i, b := range m.Blocks { - x := encodeSizeSignedBlock(&b) - if size+x > maxMsgLength { - break - } - size += x - index = i - } - - m.Blocks = m.Blocks[:index+1] - - if len(m.Blocks) == 0 { - logger.Critical().Error("truncateGiveBlocksMessage truncated blocks to an empty slice") - } -} - -// EncodeSize implements gnet.Serializer -func (m *GiveBlocksMessage) EncodeSize() uint64 { - return encodeSizeGiveBlocksMessage(m) -} - -// Encode implements gnet.Serializer -func (m *GiveBlocksMessage) Encode(buf []byte) error { - return encodeGiveBlocksMessageToBuffer(buf, m) -} - -// Decode implements gnet.Serializer -func (m *GiveBlocksMessage) Decode(buf []byte) (uint64, error) { - return decodeGiveBlocksMessage(buf, m) -} - -// Handle handle message -func (m *GiveBlocksMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - m.c = mc - return daemon.(daemoner).recordMessageEvent(m, mc) -} - -// process process message -func (m *GiveBlocksMessage) process(d daemoner) { - if d.DaemonConfig().DisableNetworking { - logger.Critical().Info("Visor disabled, ignoring GiveBlocksMessage") - return - } - - // These DB queries are not performed in a transaction for performance reasons. - // It is not necessary that the blocks be executed together in a single transaction. - - processed := 0 - maxSeq, ok, err := d.headBkSeq() - if err != nil { - logger.WithError(err).Error("d.headBkSeq failed") - return - } - if !ok { - logger.Error("No HeadBkSeq found, cannot execute blocks") - return - } - - for _, b := range m.Blocks { - // To minimize waste when receiving multiple responses from peers - // we only break out of the loop if the block itself is invalid. - // E.g. if we request 20 blocks since 0 from 2 peers, and one peer - // replies with 15 and the other 20, if we did not do this check and - // the reply with 15 was received first, we would toss the one with 20 - // even though we could process it at the time. - if b.Seq() <= maxSeq { - continue - } - - err := d.executeSignedBlock(b) - if err == nil { - logger.Critical().WithField("seq", b.Block.Head.BkSeq).Info("Added new block") - processed++ - } else { - logger.Critical().WithError(err).WithField("seq", b.Block.Head.BkSeq).Error("Failed to execute received block") - // Blocks must be received in order, so if one fails its assumed - // the rest are failing - break - } - } - if processed == 0 { - return - } - - headBkSeq, ok, err := d.headBkSeq() - if err != nil { - logger.WithError(err).Error("d.headBkSeq failed") - return - } - if !ok { - logger.Error("No HeadBkSeq found after executing blocks, will not announce blocks") - return - } - - if headBkSeq < maxSeq { - logger.Critical().Warning("HeadBkSeq decreased after executing blocks") - } else if headBkSeq-maxSeq != uint64(processed) { - logger.Critical().Warning("HeadBkSeq increased by %d but we processed %s blocks", headBkSeq-maxSeq, processed) - } - - // Announce our new blocks to peers - abm := NewAnnounceBlocksMessage(headBkSeq) - if _, err := d.broadcastMessage(abm); err != nil { - logger.WithError(err).Warning("Broadcast AnnounceBlocksMessage failed") - } - - // Request more blocks - gbm := NewGetBlocksMessage(headBkSeq, d.DaemonConfig().GetBlocksRequestCount) - if _, err := d.broadcastMessage(gbm); err != nil { - logger.WithError(err).Warning("Broadcast GetBlocksMessage failed") - } -} - -// AnnounceBlocksMessage tells a peer our highest known BkSeq. The receiving peer can choose -// to send GetBlocksMessage in response -type AnnounceBlocksMessage struct { - MaxBkSeq uint64 - c *gnet.MessageContext `enc:"-"` -} - -// NewAnnounceBlocksMessage creates message -func NewAnnounceBlocksMessage(seq uint64) *AnnounceBlocksMessage { - return &AnnounceBlocksMessage{ - MaxBkSeq: seq, - } -} - -// EncodeSize implements gnet.Serializer -func (abm *AnnounceBlocksMessage) EncodeSize() uint64 { - return encodeSizeAnnounceBlocksMessage(abm) -} - -// Encode implements gnet.Serializer -func (abm *AnnounceBlocksMessage) Encode(buf []byte) error { - return encodeAnnounceBlocksMessageToBuffer(buf, abm) -} - -// Decode implements gnet.Serializer -func (abm *AnnounceBlocksMessage) Decode(buf []byte) (uint64, error) { - return decodeAnnounceBlocksMessage(buf, abm) -} - -// Handle handles message -func (abm *AnnounceBlocksMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - abm.c = mc - return daemon.(daemoner).recordMessageEvent(abm, mc) -} - -// process process message -func (abm *AnnounceBlocksMessage) process(d daemoner) { - if d.DaemonConfig().DisableNetworking { - return - } - - fields := logrus.Fields{ - "addr": abm.c.Addr, - "gnetID": abm.c.ConnID, - } - - headBkSeq, ok, err := d.headBkSeq() - if err != nil { - logger.WithError(err).Error("AnnounceBlocksMessage d.headBkSeq failed") - return - } - if !ok { - logger.Error("AnnounceBlocksMessage no head block, cannot process AnnounceBlocksMessage") - return - } - - if headBkSeq >= abm.MaxBkSeq { - return - } - - // TODO: Should this be block get request for current sequence? - // If client is not caught up, won't attempt to get block - m := NewGetBlocksMessage(headBkSeq, d.DaemonConfig().GetBlocksRequestCount) - if err := d.sendMessage(abm.c.Addr, m); err != nil { - logger.WithError(err).WithFields(fields).Error("Send GetBlocksMessage") - } -} - -// SendingTxnsMessage send transaction message interface -type SendingTxnsMessage interface { - GetFiltered() []cipher.SHA256 -} - -// AnnounceTxnsMessage tells a peer that we have these transactions -type AnnounceTxnsMessage struct { - Transactions []cipher.SHA256 `enc:",maxlen=256"` - c *gnet.MessageContext `enc:"-"` -} - -// NewAnnounceTxnsMessage creates announce txns message. -// If the size of the message would exceed maxMsgLength, the hashes slice is truncated. -func NewAnnounceTxnsMessage(txns []cipher.SHA256, maxMsgLength uint64) *AnnounceTxnsMessage { - if len(txns) > 256 { - txns = txns[:256] - } - m := &AnnounceTxnsMessage{ - Transactions: txns, - } - hashes := truncateAnnounceTxnsHashes(m, maxMsgLength) - m.Transactions = hashes - return m -} - -// truncateAnnounceTxnsHashes truncates the hashes in AnnounceTxnsMessage to fit inside of MaxOutgoingMessageLength -func truncateAnnounceTxnsHashes(m *AnnounceTxnsMessage, maxMsgLength uint64) []cipher.SHA256 { - // The message length will include a 4 byte message type prefix. - // Panic if the prefix can't fit, otherwise we can't adjust the uint64 safely - if maxMsgLength < 4 { - logger.Panic("maxMsgLength must be >= 4") - } - - maxMsgLength -= 4 - - // Measure the current message size, if it fits, return - n := m.EncodeSize() - if n <= maxMsgLength { - return m.Transactions - } - - // Measure the size of an empty message - var mm AnnounceTxnsMessage - size := mm.EncodeSize() - - if maxMsgLength < size { - logger.Panic("maxMsgLength must be <= 4 + sizeof(empty AnnounceTxnsMessage)") - } - - maxMsgLength -= size - - hashes := truncateSHA256Slice(m.Transactions, maxMsgLength) - - if len(hashes) == 0 { - logger.Critical().Error("truncateAnnounceTxnsHashes truncated hashes to an empty slice") - } - - return hashes -} - -func truncateSHA256Slice(hashes []cipher.SHA256, maxLength uint64) []cipher.SHA256 { - if len(hashes) == 0 { - return hashes - } - - size := len(hashes[0]) - - n := maxLength / uint64(size) - - if n > uint64(len(hashes)) { - return hashes - } - - return hashes[:n] -} - -// EncodeSize implements gnet.Serializer -func (atm *AnnounceTxnsMessage) EncodeSize() uint64 { - return encodeSizeAnnounceTxnsMessage(atm) -} - -// Encode implements gnet.Serializer -func (atm *AnnounceTxnsMessage) Encode(buf []byte) error { - return encodeAnnounceTxnsMessageToBuffer(buf, atm) -} - -// Decode implements gnet.Serializer -func (atm *AnnounceTxnsMessage) Decode(buf []byte) (uint64, error) { - return decodeAnnounceTxnsMessage(buf, atm) -} - -// GetFiltered returns txns -func (atm *AnnounceTxnsMessage) GetFiltered() []cipher.SHA256 { - return atm.Transactions -} - -// Handle handle message -func (atm *AnnounceTxnsMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - atm.c = mc - return daemon.(daemoner).recordMessageEvent(atm, mc) -} - -// process process message -func (atm *AnnounceTxnsMessage) process(d daemoner) { - dc := d.DaemonConfig() - if dc.DisableNetworking { - return - } - - fields := logrus.Fields{ - "addr": atm.c.Addr, - "gnetID": atm.c.ConnID, - } - - unknown, err := d.filterKnownUnconfirmed(atm.Transactions) - if err != nil { - logger.WithError(err).Error("AnnounceTxnsMessage d.filterKnownUnconfirmed failed") - return - } - - if len(unknown) == 0 { - return - } - - m := NewGetTxnsMessage(unknown, dc.MaxOutgoingMessageLength) - if len(m.Transactions) != len(unknown) { - logger.Warningf("NewGetTxnsMessage truncated %d hashes to %d hashes", len(unknown), len(m.Transactions)) - } - - if err := d.sendMessage(atm.c.Addr, m); err != nil { - logger.WithFields(fields).WithError(err).Error("Send GetTxnsMessage failed") - } -} - -// GetTxnsMessage request transactions of given hash -type GetTxnsMessage struct { - Transactions []cipher.SHA256 `enc:",maxlen=256"` - c *gnet.MessageContext `enc:"-"` -} - -// NewGetTxnsMessage creates GetTxnsMessage. -// If the size of the message would exceed maxMsgLength, the hashes slice is truncated. -func NewGetTxnsMessage(txns []cipher.SHA256, maxMsgLength uint64) *GetTxnsMessage { - if len(txns) > 256 { - txns = txns[:256] - } - m := &GetTxnsMessage{ - Transactions: txns, - } - hashes := truncateGetTxnsHashes(m, maxMsgLength) - m.Transactions = hashes - return m -} - -// truncateGetTxnsHashes truncates the hashes in GetTxnsMessage to fit inside of MaxOutgoingMessageLength -func truncateGetTxnsHashes(m *GetTxnsMessage, maxMsgLength uint64) []cipher.SHA256 { - // The message length will include a 4 byte message type prefix. - // Panic if the prefix can't fit, otherwise we can't adjust the uint64 safely - if maxMsgLength < 4 { - logger.Panic("maxMsgLength must be >= 4") - } - - maxMsgLength -= 4 - - // Measure the current message size, if it fits, return - n := m.EncodeSize() - if n <= maxMsgLength { - return m.Transactions - } - - // Measure the size of an empty message - var mm GetTxnsMessage - size := mm.EncodeSize() - - if maxMsgLength < size { - logger.Panic("maxMsgLength must be <= 4 + sizeof(empty GetTxnsMessage)") - } - - maxMsgLength -= size - - hashes := truncateSHA256Slice(m.Transactions, maxMsgLength) - - if len(hashes) == 0 { - logger.Critical().Error("truncateGetTxnsHashes truncated hashes to an empty slice") - } - - return hashes -} - -// EncodeSize implements gnet.Serializer -func (gtm *GetTxnsMessage) EncodeSize() uint64 { - return encodeSizeGetTxnsMessage(gtm) -} - -// Encode implements gnet.Serializer -func (gtm *GetTxnsMessage) Encode(buf []byte) error { - return encodeGetTxnsMessageToBuffer(buf, gtm) -} - -// Decode implements gnet.Serializer -func (gtm *GetTxnsMessage) Decode(buf []byte) (uint64, error) { - return decodeGetTxnsMessage(buf, gtm) -} - -// Handle handle message -func (gtm *GetTxnsMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - gtm.c = mc - return daemon.(daemoner).recordMessageEvent(gtm, mc) -} - -// process process message -func (gtm *GetTxnsMessage) process(d daemoner) { - dc := d.DaemonConfig() - if dc.DisableNetworking { - return - } - - fields := logrus.Fields{ - "addr": gtm.c.Addr, - "gnetID": gtm.c.ConnID, - } - - // Locate all txns from the unconfirmed pool - known, err := d.getKnownUnconfirmed(gtm.Transactions) - if err != nil { - logger.WithError(err).Error("GetTxnsMessage d.getKnownUnconfirmed failed") - return - } - if len(known) == 0 { - return - } - - // Reply to sender with GiveTxnsMessage - m := NewGiveTxnsMessage(known, dc.MaxOutgoingMessageLength) - if len(m.Transactions) != len(known) { - logger.Warningf("NewGiveTxnsMessage truncated %d hashes to %d hashes", len(known), len(m.Transactions)) - } - - if err := d.sendMessage(gtm.c.Addr, m); err != nil { - logger.WithError(err).WithFields(fields).Error("Send GiveTxnsMessage") - } -} - -// GiveTxnsMessage tells the transaction of given hashes -type GiveTxnsMessage struct { - Transactions []coin.Transaction `enc:",maxlen=256"` - c *gnet.MessageContext `enc:"-"` -} - -// NewGiveTxnsMessage creates GiveTxnsMessage. -// If the size of the message would exceed maxMsgLength, the transactions slice is truncated. -func NewGiveTxnsMessage(txns []coin.Transaction, maxMsgLength uint64) *GiveTxnsMessage { - if len(txns) > 256 { - txns = txns[:256] - } - m := &GiveTxnsMessage{ - Transactions: txns, - } - truncateGiveTxnsMessage(m, maxMsgLength) - return m -} - -// truncateGiveTxnsMessage truncates the transactions in GiveTxnsMessage to fit inside of MaxOutgoingMessageLength -func truncateGiveTxnsMessage(m *GiveTxnsMessage, maxMsgLength uint64) { - // The message length will include a 4 byte message type prefix. - // Panic if the prefix can't fit, otherwise we can't adjust the uint64 safely - if maxMsgLength < 4 { - logger.Panic("maxMsgLength must be >= 4") - } - - maxMsgLength -= 4 - - // Measure the current message size, if it fits, return - n := m.EncodeSize() - if n <= maxMsgLength { - return - } - - // Measure the size of an empty message - var mm GiveTxnsMessage - size := mm.EncodeSize() - - // Measure the size of the txns, advancing the slice index until it reaches capacity - index := -1 - for i, txn := range m.Transactions { - x := encodeSizeTransaction(&txn) - if size+x > maxMsgLength { - break - } - size += x - index = i - } - - m.Transactions = m.Transactions[:index+1] - - if len(m.Transactions) == 0 { - logger.Critical().Error("truncateGiveTxnsMessage truncated txns to an empty slice") - } -} - -// EncodeSize implements gnet.Serializer -func (gtm *GiveTxnsMessage) EncodeSize() uint64 { - return encodeSizeGiveTxnsMessage(gtm) -} - -// Encode implements gnet.Serializer -func (gtm *GiveTxnsMessage) Encode(buf []byte) error { - return encodeGiveTxnsMessageToBuffer(buf, gtm) -} - -// Decode implements gnet.Serializer -func (gtm *GiveTxnsMessage) Decode(buf []byte) (uint64, error) { - return decodeGiveTxnsMessage(buf, gtm) -} - -// GetFiltered returns transactions hashes -func (gtm *GiveTxnsMessage) GetFiltered() []cipher.SHA256 { - return coin.Transactions(gtm.Transactions).Hashes() -} - -// Handle handle message -func (gtm *GiveTxnsMessage) Handle(mc *gnet.MessageContext, daemon interface{}) error { - gtm.c = mc - return daemon.(daemoner).recordMessageEvent(gtm, mc) -} - -// process process message -func (gtm *GiveTxnsMessage) process(d daemoner) { - dc := d.DaemonConfig() - if dc.DisableNetworking { - return - } - - hashes := make([]cipher.SHA256, 0, len(gtm.Transactions)) - // Update unconfirmed pool with these transactions - for _, txn := range gtm.Transactions { - // Only announce transactions that are new to us, so that peers can't spam relays - // It is not necessary to inject all of the transactions inside a database transaction, - // since each is independent - known, softErr, err := d.injectTransaction(txn) - if err != nil { - logger.WithError(err).WithField("txid", txn.Hash().Hex()).Warning("Failed to record transaction") - continue - } else if softErr != nil { - logger.WithError(softErr).WithField("txid", txn.Hash().Hex()).Warning("Transaction soft violation") - // Allow soft txn violations to rebroadcast - } else if known { - logger.WithField("txid", txn.Hash().Hex()).Debug("Duplicate transaction") - continue - } - - hashes = append(hashes, txn.Hash()) - } - - if len(hashes) == 0 { - return - } - - // Announce these transactions to peers - m := NewAnnounceTxnsMessage(hashes, dc.MaxOutgoingMessageLength) - if len(m.Transactions) != len(hashes) { - logger.Warningf("NewAnnounceTxnsMessage truncated %d hashes to %d hashes", len(hashes), len(m.Transactions)) - } - - if ids, err := d.broadcastMessage(m); err != nil { - logger.WithError(err).Warning("Broadcast AnnounceTxnsMessage failed") - } else { - logger.Debugf("Announced %d transactions to %d peers", len(hashes), len(ids)) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/README.md b/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/README.md deleted file mode 100644 index 2dc9172..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/README.md +++ /dev/null @@ -1,8 +0,0 @@ -pex -=== - -Tools for implementing peer exchange (PEX) with Go - -[![GoDoc](http://godoc.org/github.com//skycoin/pex?status.png)](http://godoc.org/github.com/SkycoinProject/skycoin/src/daemon/pex) - -[Godoc generated documentation](http://godoc.org/github.com/SkycoinProject/skycoin/src/daemon/pex) diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/peerlist.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/peerlist.go deleted file mode 100644 index bcea692..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/peerlist.go +++ /dev/null @@ -1,417 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "io" - "math/rand" - "os" - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/util/file" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -// Peers peer list -type Peers []Peer - -// ToAddrs returns the address list -func (ps Peers) ToAddrs() []string { - addrs := make([]string, 0, len(ps)) - for _, p := range ps { - addrs = append(addrs, p.Addr) - } - return addrs -} - -// peerlist is a map of addresses to *PeerStates -type peerlist struct { - peers map[string]*Peer -} - -func newPeerlist() peerlist { - return peerlist{ - peers: make(map[string]*Peer), - } -} - -// Filter peers filter -type Filter func(peer Peer) bool - -// loadCachedPeersFile loads peers from the cached peers.json file -func loadCachedPeersFile(path string) (map[string]*Peer, error) { - peersJSON := make(map[string]PeerJSON) - err := file.LoadJSON(path, &peersJSON) - - if os.IsNotExist(err) { - logger.WithField("path", path).Info("File does not exist") - return nil, nil - } else if err == io.EOF { - logger.WithField("path", path).Error("Corrupt or empty file") - return nil, nil - } - - if err != nil { - logger.WithField("path", path).WithError(err).Error("Failed to load peers file") - return nil, err - } - - peers := make(map[string]*Peer, len(peersJSON)) - for addr, peerJSON := range peersJSON { - fields := logrus.Fields{ - "addr": addr, - "path": path, - } - - a, err := validateAddress(addr, true) - - if err != nil { - logger.WithError(err).WithFields(fields).Error("Invalid address in peers JSON file") - continue - } - - peer, err := newPeerFromJSON(peerJSON) - if err != nil { - logger.WithError(err).WithFields(fields).Error("newPeerFromJSON failed") - continue - } - - if a != peer.Addr { - fields["peerAddr"] = peer.Addr - logger.WithFields(fields).Error("Address key does not match Peer.Addr") - continue - } - - peers[a] = peer - } - - return peers, nil -} - -func (pl *peerlist) setPeers(peers []Peer) { - for _, p := range peers { - np := p - pl.peers[p.Addr] = &np - } -} - -func (pl *peerlist) hasPeer(addr string) bool { - p, ok := pl.peers[addr] - return ok && p != nil -} - -func (pl *peerlist) addPeer(addr string) { - if p, ok := pl.peers[addr]; ok && p != nil { - p.Seen() - return - } - - peer := NewPeer(addr) - pl.peers[addr] = peer -} - -func (pl *peerlist) addPeers(addrs []string) { - for _, addr := range addrs { - pl.addPeer(addr) - } -} - -func (pl *peerlist) seen(addr string) { - if p, ok := pl.peers[addr]; ok && p != nil { - p.Seen() - } -} - -// getCanTryPeers returns all peers that are triable(retried times blew exponential backoff times) -// and are able to pass the filters. -func (pl *peerlist) getCanTryPeers(flts []Filter) Peers { - ps := make(Peers, 0) - flts = append([]Filter{canTry}, flts...) -loop: - for _, p := range pl.peers { - for i := range flts { - if !flts[i](*p) { - continue loop - } - } - - ps = append(ps, *p) - } - - return ps -} - -// getPeers returns all peers that can pass the filters. -func (pl *peerlist) getPeers(flts []Filter) Peers { - ps := make(Peers, 0) -loop: - for _, p := range pl.peers { - for i := range flts { - if !flts[i](*p) { - continue loop - } - } - - ps = append(ps, *p) - } - - return ps -} - -// filters -func isPrivate(p Peer) bool { - return p.Private -} - -func isPublic(p Peer) bool { - return !p.Private -} - -func isTrusted(p Peer) bool { - return p.Trusted -} - -func hasIncomingPort(p Peer) bool { - return p.HasIncomingPort -} - -func canTry(p Peer) bool { - return p.CanTry() -} - -// isExchangeable filters exchangeable peers -var isExchangeable = []Filter{hasIncomingPort, isPublic} - -// removePeer removes peer -func (pl *peerlist) removePeer(addr string) { - delete(pl.peers, addr) -} - -// SetPrivate sets specific peer as private -func (pl *peerlist) setPrivate(addr string, private bool) error { - if p, ok := pl.peers[addr]; ok { - p.Private = private - return nil - } - - return fmt.Errorf("set peer.Private failed: %v does not exist in peer list", addr) -} - -// setTrusted sets peer as trusted peer -func (pl *peerlist) setTrusted(addr string, trusted bool) error { - if p, ok := pl.peers[addr]; ok { - p.Trusted = trusted - return nil - } - - return fmt.Errorf("set peer.Trusted failed: %v does not exist in peer list", addr) -} - -// setAllUntrusted unsets the trusted field on all peers -func (pl *peerlist) setAllUntrusted() { - for _, p := range pl.peers { - p.Trusted = false - } -} - -// setHasIncomingPort marks the peer's port as being publicly accessible -func (pl *peerlist) setHasIncomingPort(addr string, hasIncomingPort bool) error { - if p, ok := pl.peers[addr]; ok { - p.HasIncomingPort = hasIncomingPort - p.Seen() - return nil - } - - return fmt.Errorf("set peer.HasIncomingPort failed: %v does not exist in peer list", addr) -} - -// setUserAgent sets a peer's user agent -func (pl *peerlist) setUserAgent(addr string, userAgent useragent.Data) error { - if p, ok := pl.peers[addr]; ok { - p.UserAgent = userAgent - p.Seen() - return nil - } - - return fmt.Errorf("set peer.UserAgent failed: %v does not exist in peer list", addr) -} - -// len returns number of peers -func (pl *peerlist) len() int { - return len(pl.peers) -} - -// getPeer returns peer for a given address -func (pl *peerlist) getPeer(addr string) (Peer, bool) { - p, ok := pl.peers[addr] - if ok { - return *p, true - } - return Peer{}, false -} - -// clearOld removes public, untrusted peers that haven't been seen in timeAgo seconds -func (pl *peerlist) clearOld(timeAgo time.Duration) { - t := time.Now().UTC() - for addr, peer := range pl.peers { - lastSeen := time.Unix(peer.LastSeen, 0) - if !peer.Private && !peer.Trusted && t.Sub(lastSeen) > timeAgo { - delete(pl.peers, addr) - } - } -} - -// Returns n random peers, or all of the peers, whichever is lower. -// If count is 0, all of the peers are returned, shuffled. -func (pl *peerlist) random(count int, flts []Filter) Peers { - keys := pl.getCanTryPeers(flts).ToAddrs() - if len(keys) == 0 { - return Peers{} - } - - max := count - if max == 0 || max > len(keys) { - max = len(keys) - } - - ps := make(Peers, max) - perm := rand.Perm(len(keys)) - for i, j := range perm[:max] { - ps[i] = *pl.peers[keys[j]] - } - return ps -} - -// save saves known peers to disk as a newline delimited list of addresses to -// -func (pl *peerlist) save(fn string) error { - // filter the peers that has retrytime > MaxPeerRetryTimes - peers := make(map[string]PeerJSON) - for k, p := range pl.peers { - if p.RetryTimes <= MaxPeerRetryTimes { - peers[k] = newPeerJSON(*p) - } - } - - if err := file.SaveJSON(fn, peers, 0600); err != nil { - return fmt.Errorf("save peer list failed: %s", err) - } - return nil -} - -// increaseRetryTimes increases retry times -func (pl *peerlist) increaseRetryTimes(addr string) { - if p, ok := pl.peers[addr]; ok { - p.IncreaseRetryTimes() - p.Seen() - } -} - -// resetRetryTimes reset retry times -func (pl *peerlist) resetRetryTimes(addr string) { - if p, ok := pl.peers[addr]; ok { - p.ResetRetryTimes() - p.Seen() - } -} - -// resetAllRetryTimes reset all peers' retry times -func (pl *peerlist) resetAllRetryTimes() { - logger.Info("Reset all peer's retry times") - for _, p := range pl.peers { - p.ResetRetryTimes() - } -} - -func (pl *peerlist) findOldestUntrustedPeer() *Peer { - var oldest *Peer - - for _, p := range pl.peers { - if p.Trusted || p.Private { - continue - } - - if oldest == nil || p.LastSeen < oldest.LastSeen { - oldest = p - } - } - - if oldest != nil { - p := *oldest - return &p - } - - return nil -} - -// PeerJSON is for saving and loading peers to disk. Some fields are strange, -// to be backwards compatible due to variable name changes -type PeerJSON struct { - Addr string // An address of the form ip:port - // Unix timestamp when this peer was last seen. - // This could be a time.Time string or an int64 timestamp - LastSeen interface{} - Private bool // Whether it should omitted from public requests - Trusted bool // Whether this peer is trusted - HasIncomePort *bool `json:"HasIncomePort,omitempty"` // Whether this peer has incoming port [DEPRECATED] - HasIncomingPort *bool // Whether this peer has incoming port - UserAgent useragent.Data -} - -// newPeerJSON returns a PeerJSON from a Peer -func newPeerJSON(p Peer) PeerJSON { - return PeerJSON{ - Addr: p.Addr, - LastSeen: p.LastSeen, - Private: p.Private, - Trusted: p.Trusted, - HasIncomingPort: &p.HasIncomingPort, - UserAgent: p.UserAgent, - } -} - -// newPeerFromJSON converts a PeerJSON to a Peer -func newPeerFromJSON(p PeerJSON) (*Peer, error) { - hasIncomingPort := false - if p.HasIncomingPort != nil { - hasIncomingPort = *p.HasIncomingPort - } else if p.HasIncomePort != nil { - hasIncomingPort = *p.HasIncomePort - } - - // LastSeen could be a RFC3339Nano timestamp or an int64 unix timestamp - var lastSeen int64 - switch p.LastSeen.(type) { - case string: - lastSeenTime, err := time.Parse(time.RFC3339Nano, p.LastSeen.(string)) - if err != nil { - return nil, err - } - lastSeen = lastSeenTime.Unix() - case json.Number: - lastSeenNum := p.LastSeen.(json.Number) - var err error - lastSeen, err = lastSeenNum.Int64() - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("Invalid type %T for LastSeen field", p.LastSeen) - } - - addr, err := validateAddress(p.Addr, true) - if err != nil { - return nil, err - } - - return &Peer{ - Addr: addr, - LastSeen: lastSeen, - Private: p.Private, - Trusted: p.Trusted, - HasIncomingPort: hasIncomingPort, - UserAgent: p.UserAgent, - }, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/pex.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/pex.go deleted file mode 100644 index 5e267e3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pex/pex.go +++ /dev/null @@ -1,761 +0,0 @@ -// Package pex is a toolkit for implementing a peer exchange system -package pex - -import ( - "errors" - "fmt" - "io/ioutil" - "math" - "math/rand" - "net" - "net/http" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff" - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -//TODO: -// - keep track of last time the peer was connected to -// - last time peer was connected to is more important than "seen" -// - peer "seen" means something else than use here -// - save last time connected to, use 0 for never -// - only transmit peers that have active or recent connections - -const ( - // DefaultPeerListURL is the default URL to download remote peers list from, if enabled - DefaultPeerListURL = "https://downloads.skycoin.com/blockchain/peers.txt" - // PeerCacheFilename filename for disk-cached peers - PeerCacheFilename = "peers.json" - // oldPeerCacheFilename previous filename for disk-cached peers. The cache loader will fall back onto this filename if it can't load peers.json - oldPeerCacheFilename = "peers.txt" - // MaxPeerRetryTimes is the maximum number of times to retry a peer - MaxPeerRetryTimes = 10 -) - -var ( - // ErrPeerlistFull is returned when the Pex is at a maximum - ErrPeerlistFull = errors.New("Peer list full") - // ErrInvalidAddress is returned when an address appears malformed - ErrInvalidAddress = errors.New("Invalid address") - // ErrNoLocalhost is returned if a localhost addresses are not allowed - ErrNoLocalhost = errors.New("Localhost address is not allowed") - // ErrNotExternalIP is returned if an IP address is not a global unicast address - ErrNotExternalIP = errors.New("IP is not a valid external IP") - // ErrPortTooLow is returned if a port is less than 1024 - ErrPortTooLow = errors.New("Port must be >= 1024") - // ErrBlacklistedAddress returned when attempting to add a blacklisted peer - ErrBlacklistedAddress = errors.New("Blacklisted address") - - // Logging. See http://godoc.org/github.com/op/go-logging for - // instructions on how to include this log's output - logger = logging.MustGetLogger("pex") - // Default rng - rnum = rand.New(rand.NewSource(time.Now().Unix())) - // For removing inadvertent whitespace from addresses - whitespaceFilter = regexp.MustCompile(`\s`) -) - -// validateAddress returns a sanitized address if valid, otherwise an error -func validateAddress(ipPort string, allowLocalhost bool) (string, error) { - ipPort = whitespaceFilter.ReplaceAllString(ipPort, "") - pts := strings.Split(ipPort, ":") - if len(pts) != 2 { - return "", ErrInvalidAddress - } - - ip := net.ParseIP(pts[0]) - if ip == nil { - return "", ErrInvalidAddress - } else if ip.IsLoopback() { - if !allowLocalhost { - return "", ErrNoLocalhost - } - } else if !ip.IsGlobalUnicast() { - return "", ErrNotExternalIP - } - - port, err := strconv.ParseUint(pts[1], 10, 16) - if err != nil { - return "", ErrInvalidAddress - } - - if port < 1024 { - return "", ErrPortTooLow - } - - return ipPort, nil -} - -// Peer represents a known peer -type Peer struct { - Addr string // An address of the form ip:port - LastSeen int64 // Unix timestamp when this peer was last seen - Private bool // Whether it should omitted from public requests - Trusted bool // Whether this peer is trusted - HasIncomingPort bool // Whether this peer has accessible public port - UserAgent useragent.Data // Peer's last reported user agent - RetryTimes int `json:"-"` // records the retry times -} - -// NewPeer returns a *Peer initialized by an address string of the form ip:port -func NewPeer(address string) *Peer { - p := &Peer{ - Addr: address, - Private: false, - Trusted: false, - } - p.Seen() - return p -} - -// Seen marks the peer as seen -func (peer *Peer) Seen() { - peer.LastSeen = time.Now().UTC().Unix() -} - -// IncreaseRetryTimes adds the retry times -func (peer *Peer) IncreaseRetryTimes() { - peer.RetryTimes++ - logger.WithFields(logrus.Fields{ - "addr": peer.Addr, - "retryTimes": peer.RetryTimes, - }).Debug("Increase retry times") -} - -// ResetRetryTimes resets the retry time -func (peer *Peer) ResetRetryTimes() { - peer.RetryTimes = 0 -} - -// CanTry returns whether this peer is tryable base on the exponential backoff algorithm -func (peer *Peer) CanTry() bool { - // Exponential backoff - mod := (math.Exp2(float64(peer.RetryTimes)) - 1) * 5 - if mod == 0 { - return true - } - - // Random time elapsed - now := time.Now().UTC().Unix() - t := rnum.Int63n(int64(mod)) - return now-peer.LastSeen > t -} - -// String returns the peer address -func (peer *Peer) String() string { - return peer.Addr -} - -// Config pex config -type Config struct { - // Folder where peers database should be saved - DataDirectory string - // Maximum number of peers to keep account of in the PeerList - Max int - // Cull peers after they havent been seen in this much time - Expiration time.Duration - // Cull expired peers on this interval - CullRate time.Duration - // clear old peers on this interval - ClearOldRate time.Duration - // How often to clear expired blacklist entries - UpdateBlacklistRate time.Duration - // How often to request peers via PEX - RequestRate time.Duration - // How many peers to send back in response to a peers request - ReplyCount int - // Localhost peers are allowed in the peerlist - AllowLocalhost bool - // Disable exchanging of peers. Peers are still loaded from disk - Disabled bool - // Whether the network is disabled - NetworkDisabled bool - // Download peers list from remote host - DownloadPeerList bool - // Download peers list from this URL - PeerListURL string - // Set all peers as untrusted (even if loaded from DefaultConnections) - DisableTrustedPeers bool - // Load peers from this file on disk. NOTE: this is different from the peers file cache in the data directory - CustomPeersFile string - // Default "trusted" connections - DefaultConnections []string -} - -// NewConfig creates default pex config. -func NewConfig() Config { - return Config{ - DataDirectory: "./", - Max: 65535, - Expiration: time.Hour * 24 * 7, - CullRate: time.Minute * 10, - ClearOldRate: time.Minute * 10, - UpdateBlacklistRate: time.Minute, - RequestRate: time.Minute, - ReplyCount: 30, - AllowLocalhost: false, - Disabled: false, - NetworkDisabled: false, - DownloadPeerList: false, - PeerListURL: DefaultPeerListURL, - DisableTrustedPeers: false, - CustomPeersFile: "", - } -} - -// Pex manages a set of known peers and controls peer acquisition -type Pex struct { - sync.RWMutex - // All known peers - peerlist peerlist - Config Config - quit chan struct{} - done chan struct{} -} - -// New creates pex -func New(cfg Config) (*Pex, error) { - pex := &Pex{ - Config: cfg, - peerlist: newPeerlist(), - quit: make(chan struct{}), - done: make(chan struct{}), - } - - // Load peers from disk - if err := pex.loadCache(); err != nil { - logger.Critical().WithError(err).Error("pex.loadCache failed") - return nil, err - } - - // Unset trusted status from any existing peers, regenerate - // them from the DefaultConnections - pex.setAllUntrusted() - - // Load default hardcoded peers, mark them as trusted - for _, addr := range cfg.DefaultConnections { - // Default peers will mark as trusted peers. - if err := pex.AddPeer(addr); err != nil { - logger.Critical().WithError(err).Error("Add default peer failed") - return nil, err - } - if err := pex.setTrusted(addr); err != nil { - logger.Critical().WithError(err).Error("pex.setTrusted for default peer failed") - return nil, err - } - } - - if cfg.DisableTrustedPeers { - // Unset trusted status from any existing peers - pex.setAllUntrusted() - } - - // Add custom peers - if cfg.CustomPeersFile != "" { - if err := pex.loadCustom(cfg.CustomPeersFile); err != nil { - logger.Critical().WithError(err).WithField("file", cfg.CustomPeersFile).Error("Failed to load custom peers file") - return nil, err - } - } - - // Save peers to disk - if err := pex.save(); err != nil { - return nil, err - } - - // Download peers from remote peers list if networking is enabled - if pex.Config.DownloadPeerList && !pex.Config.NetworkDisabled { - go func() { - if err := pex.downloadPeers(); err != nil { - logger.WithError(err).Error("Failed to download peers list") - } - }() - } - - return pex, nil -} - -// Run starts the pex service -func (px *Pex) Run() error { - logger.Info("Pex.Run started") - defer logger.Info("Pex.Run stopped") - defer close(px.done) - - defer func() { - // Save the peerlist - logger.Info("Save peerlist") - if err := px.save(); err != nil { - logger.WithError(err).Error("Save peerlist failed") - } - }() - - clearOldTicker := time.NewTicker(px.Config.ClearOldRate) - - for { - select { - case <-clearOldTicker.C: - // Remove peers we haven't seen in a while - if !px.Config.Disabled && !px.Config.NetworkDisabled { - func() { - px.Lock() - defer px.Unlock() - px.peerlist.clearOld(px.Config.Expiration) - }() - } - case <-px.quit: - return nil - } - } -} - -// Shutdown notifies the pex service to exist -func (px *Pex) Shutdown() { - logger.Info("Shutting down pex") - defer logger.Info("Pex shutdown") - close(px.quit) - <-px.done -} - -func (px *Pex) downloadPeers() error { - body, err := backoffDownloadText(px.Config.PeerListURL) - if err != nil { - logger.WithError(err).WithField("url", px.Config.PeerListURL).Error("Failed to download peers") - return err - } - - peers := parseRemotePeerList(body) - logger.WithField("url", px.Config.PeerListURL).Infof("Downloaded peers list, got %d peers", len(peers)) - - n := px.AddPeers(peers) - logger.WithField("url", px.Config.PeerListURL).Infof("Added %d/%d peers from downloaded peers list", n, len(peers)) - - return nil -} - -func (px *Pex) loadCache() error { - px.Lock() - defer px.Unlock() - - fp := filepath.Join(px.Config.DataDirectory, PeerCacheFilename) - peers, err := loadCachedPeersFile(fp) - - if err != nil { - return err - } - - // If the PeerCacheFilename peers.json file does not exist, try to load the old peers.txt file - if peers == nil { - logger.Infof("Peer cache %s not found, falling back on %s", PeerCacheFilename, oldPeerCacheFilename) - - fp := filepath.Join(px.Config.DataDirectory, oldPeerCacheFilename) - peers, err = loadCachedPeersFile(fp) - if err != nil { - return err - } - - if peers == nil { - logger.Infof("Fallback peer cache %s not found", oldPeerCacheFilename) - return nil - } - } - - // remove invalid peers and limit the max number of peers to pex.Config.Max - var validPeers []Peer - for addr, p := range peers { - if _, err := validateAddress(addr, px.Config.AllowLocalhost); err != nil { - logger.WithError(err).Error("Invalid peer address") - continue - } - - validPeers = append(validPeers, *p) - if px.Config.Max > 0 && len(validPeers) >= px.Config.Max { - break - } - } - - px.peerlist.setPeers(validPeers) - return nil -} - -func (px *Pex) loadCustom(fn string) error { - px.Lock() - defer px.Unlock() - - f, err := os.Open(fn) - if err != nil { - return err - } - - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return err - } - - peers, err := parseLocalPeerList(string(data), px.Config.AllowLocalhost) - if err != nil { - return err - } - - logger.Infof("Loaded %d peers from %s", len(peers), fn) - - px.peerlist.addPeers(peers) - return nil -} - -// SavePeers persists the peerlist -func (px *Pex) save() error { - px.Lock() - defer px.Unlock() - - fn := filepath.Join(px.Config.DataDirectory, PeerCacheFilename) - return px.peerlist.save(fn) -} - -// AddPeer adds a peer to the peer list, given an address. If the peer list is -// full, it will try to remove an old peer to make room. -// If no room can be made, ErrPeerlistFull is returned -func (px *Pex) AddPeer(addr string) error { - px.Lock() - defer px.Unlock() - - cleanAddr, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithError(err).WithField("addr", addr).Error("Invalid address") - return ErrInvalidAddress - } - - if px.peerlist.hasPeer(cleanAddr) { - px.peerlist.seen(cleanAddr) - return nil - } - - if px.isFull() { - oldestPeer := px.peerlist.findOldestUntrustedPeer() - if oldestPeer == nil || time.Now().UTC().Unix()-oldestPeer.LastSeen < 60*60*24 { - return ErrPeerlistFull - } - - px.peerlist.removePeer(oldestPeer.Addr) - - if px.isFull() { - // This can happen if the node is run with a peers.json file that has more peers - // than the max peerlist size, then the peers.json file isn't truncated to the max peerlist size. - // It is not an error. - // The max is a soft limit; exceeding the max will not crash the program. - logger.Critical().Error("AddPeer: after removing the worst peer, the peerlist was still full") - } - } - - px.peerlist.addPeer(cleanAddr) - return nil -} - -// AddPeers add multiple peers at once. Any errors will be logged, but not returned -// Returns the number of peers that were added without error. Note that -// adding a duplicate peer will not cause an error. -func (px *Pex) AddPeers(addrs []string) int { - px.Lock() - defer px.Unlock() - - if px.Config.Max > 0 && px.peerlist.len() >= px.Config.Max { - logger.Warning("Add peers failed, peer list is full") - return 0 - } - - // validate the addresses - var validAddrs []string - for _, addr := range addrs { - a, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithField("addr", addr).WithError(err).Info("Add peers sees an invalid address") - continue - } - validAddrs = append(validAddrs, a) - } - addrs = validAddrs - - // Shuffle the addresses before capping them - rand.Shuffle(len(addrs), func(i, j int) { - addrs[i], addrs[j] = addrs[j], addrs[i] - }) - - if px.Config.Max > 0 { - rcap := px.Config.Max - px.peerlist.len() - if len(addrs) > rcap { - addrs = addrs[:rcap] - } - } - - px.peerlist.addPeers(addrs) - return len(addrs) -} - -// SetPrivate updates peer's private value -func (px *Pex) SetPrivate(addr string, private bool) error { - px.Lock() - defer px.Unlock() - - cleanAddr, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithError(err).WithField("addr", addr).Error("Invalid address") - return ErrInvalidAddress - } - - return px.peerlist.setPrivate(cleanAddr, private) -} - -// setTrusted marks a peer as a default peer by setting its trusted flag to true -func (px *Pex) setTrusted(addr string) error { - px.Lock() - defer px.Unlock() - - cleanAddr, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithError(err).WithField("addr", addr).Error("Invalid address") - return ErrInvalidAddress - } - - return px.peerlist.setTrusted(cleanAddr, true) -} - -// setAllUntrusted unsets the trusted field on all peers -func (px *Pex) setAllUntrusted() { - px.Lock() - defer px.Unlock() - - px.peerlist.setAllUntrusted() -} - -// SetHasIncomingPort sets if the peer has public port -func (px *Pex) SetHasIncomingPort(addr string, hasPublicPort bool) error { - px.Lock() - defer px.Unlock() - - cleanAddr, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithError(err).WithField("addr", addr).Error("Invalid address") - return ErrInvalidAddress - } - - return px.peerlist.setHasIncomingPort(cleanAddr, hasPublicPort) -} - -// SetUserAgent sets the peer's user agent -func (px *Pex) SetUserAgent(addr string, userAgent useragent.Data) error { - px.Lock() - defer px.Unlock() - - if !userAgent.Empty() { - if _, err := userAgent.Build(); err != nil { - return err - } - } - - cleanAddr, err := validateAddress(addr, px.Config.AllowLocalhost) - if err != nil { - logger.WithError(err).WithField("addr", addr).Error("Invalid address") - return ErrInvalidAddress - } - - return px.peerlist.setUserAgent(cleanAddr, userAgent) -} - -// RemovePeer removes peer -func (px *Pex) RemovePeer(addr string) { - px.Lock() - defer px.Unlock() - px.peerlist.removePeer(addr) -} - -// GetPeer returns peer of given address -func (px *Pex) GetPeer(addr string) (Peer, bool) { - px.RLock() - defer px.RUnlock() - return px.peerlist.getPeer(addr) -} - -// Trusted returns trusted peers -func (px *Pex) Trusted() Peers { - px.RLock() - defer px.RUnlock() - return px.peerlist.getPeers([]Filter{isTrusted}) -} - -// Private returns private peers -func (px *Pex) Private() Peers { - px.RLock() - defer px.RUnlock() - return px.peerlist.getCanTryPeers([]Filter{isPrivate}) -} - -// TrustedPublic returns trusted public peers -func (px *Pex) TrustedPublic() Peers { - px.RLock() - defer px.RUnlock() - return px.peerlist.getCanTryPeers([]Filter{isPublic, isTrusted}) -} - -// RandomPublic returns N random public untrusted peers -func (px *Pex) RandomPublic(n int) Peers { - px.RLock() - defer px.RUnlock() - return px.peerlist.random(n, []Filter{func(p Peer) bool { - return !p.Private - }}) -} - -// RandomExchangeable returns N random exchangeable peers -func (px *Pex) RandomExchangeable(n int) Peers { - px.RLock() - defer px.RUnlock() - return px.peerlist.random(n, isExchangeable) -} - -// IncreaseRetryTimes increases retry times -func (px *Pex) IncreaseRetryTimes(addr string) { - px.Lock() - defer px.Unlock() - px.peerlist.increaseRetryTimes(addr) -} - -// ResetRetryTimes reset retry times -func (px *Pex) ResetRetryTimes(addr string) { - px.Lock() - defer px.Unlock() - px.peerlist.resetRetryTimes(addr) -} - -// ResetAllRetryTimes reset all peers' retry times -func (px *Pex) ResetAllRetryTimes() { - px.Lock() - defer px.Unlock() - px.peerlist.resetAllRetryTimes() -} - -// IsFull returns whether the peer list is full -func (px *Pex) IsFull() bool { - px.RLock() - defer px.RUnlock() - return px.isFull() -} - -func (px *Pex) isFull() bool { - return px.Config.Max > 0 && px.peerlist.len() >= px.Config.Max -} - -// downloadText downloads a text format file from url. -// Returns the raw response body as a string. -// TODO -- move to util, add backoff options -func downloadText(url string) (string, error) { - resp, err := http.Get(url) //nolint:gosec - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - return string(body), nil -} - -func backoffDownloadText(url string) (string, error) { - var body string - - b := backoff.NewExponentialBackOff() - - notify := func(err error, wait time.Duration) { - logger.WithError(err).WithField("waitTime", wait).Error("waiting to retry downloadText") - } - - operation := func() error { - logger.WithField("url", url).Info("Trying to download peers list") - var err error - body, err = downloadText(url) - return err - } - - if err := backoff.RetryNotify(operation, b, notify); err != nil { - logger.WithField("url", url).WithError(err).Info("Gave up dowloading peers list") - return "", err - } - - logger.WithField("url", url).Info("Peers list downloaded") - - return body, nil -} - -// parseRemotePeerList parses a remote peers.txt file -// The peers list format is newline separated list of ip:port strings -// Any lines that don't parse to an ip:port are skipped, otherwise they return an error -// Localhost ip:port addresses are ignored -// NOTE: this does not parse the cached peers.json file in the data directory, which is a JSON file -// and is loaded by loadCachedPeersFile -func parseRemotePeerList(body string) []string { - var peers []string - for _, addr := range strings.Split(body, "\n") { - addr = whitespaceFilter.ReplaceAllString(addr, "") - if addr == "" { - continue - } - - // Never allow localhost addresses from the remote peers list - a, err := validateAddress(addr, false) - if err != nil { - err = fmt.Errorf("Peers list has invalid address %s: %v", addr, err) - logger.WithError(err).Error() - continue - } - - peers = append(peers, a) - } - - return peers -} - -// parseLocalPeerList parses a local peers.txt file -// The peers list format is newline separated list of ip:port strings -// Empty lines and lines that begin with # are treated as comment lines -// Otherwise, the line is parsed as an ip:port -// If the line fails to parse, an error is returned -// Localhost addresses are allowed if allowLocalhost is true -// NOTE: this does not parse the cached peers.json file in the data directory, which is a JSON file -// and is loaded by loadCachedPeersFile -func parseLocalPeerList(body string, allowLocalhost bool) ([]string, error) { - var peers []string - for _, addr := range strings.Split(body, "\n") { - addr = whitespaceFilter.ReplaceAllString(addr, "") - if addr == "" { - continue - } - - if strings.HasPrefix(addr, "#") { - continue - } - - a, err := validateAddress(addr, allowLocalhost) - if err != nil { - err = fmt.Errorf("Peers list has invalid address %s: %v", addr, err) - logger.WithError(err).Error() - return nil, err - } - - peers = append(peers, a) - } - - return peers, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pool.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/pool.go deleted file mode 100644 index 9ecb9c7..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/pool.go +++ /dev/null @@ -1,124 +0,0 @@ -package daemon - -import ( - "time" - - "github.com/SkycoinProject/skycoin/src/daemon/gnet" -) - -// PoolConfig pool config -type PoolConfig struct { - // Timeout when trying to connect to new peers through the pool - DialTimeout time.Duration - // How often to process message buffers and generate events - MessageHandlingRate time.Duration - // How long to wait before sending another ping - PingRate time.Duration - // How long a connection can idle before considered stale - IdleLimit time.Duration - // How often to check for needed pings - IdleCheckRate time.Duration - // How often to check for stale connections - ClearStaleRate time.Duration - // Buffer size for gnet.ConnectionPool's network Read events - EventChannelSize int - // Maximum number of connections - MaxConnections int - // Maximum number of outgoing connections - MaxOutgoingConnections int - // Maximum number of outgoing connections to peers in the DefaultConnections list to maintain - MaxDefaultPeerOutgoingConnections int - // Default "trusted" peers - DefaultConnections []string - // Maximum length of incoming messages in bytes - MaxIncomingMessageLength int - // Maximum length of outgoing messages in bytes - MaxOutgoingMessageLength int - // These should be assigned by the controlling daemon - address string - port int -} - -// NewPoolConfig creates pool config -func NewPoolConfig() PoolConfig { - return PoolConfig{ - port: 6677, - address: "", - DialTimeout: time.Second * 30, - MessageHandlingRate: time.Millisecond * 50, - PingRate: 5 * time.Second, - IdleLimit: 60 * time.Second, - IdleCheckRate: 1 * time.Second, - ClearStaleRate: 1 * time.Second, - EventChannelSize: 4096, - MaxConnections: 128, - MaxOutgoingConnections: 8, - MaxDefaultPeerOutgoingConnections: 1, - MaxOutgoingMessageLength: 256 * 1024, - MaxIncomingMessageLength: 1024 * 1024, - } -} - -// Pool maintains config and pool -type Pool struct { - Config PoolConfig - Pool *gnet.ConnectionPool -} - -// NewPool creates pool -func NewPool(cfg PoolConfig, d *Daemon) (*Pool, error) { - gnetCfg := gnet.NewConfig() - gnetCfg.DialTimeout = cfg.DialTimeout - gnetCfg.Port = uint16(cfg.port) - gnetCfg.Address = cfg.address - gnetCfg.ConnectCallback = d.onGnetConnect - gnetCfg.DisconnectCallback = d.onGnetDisconnect - gnetCfg.ConnectFailureCallback = d.onGnetConnectFailure - gnetCfg.MaxConnections = cfg.MaxConnections - gnetCfg.MaxOutgoingConnections = cfg.MaxOutgoingConnections - gnetCfg.MaxDefaultPeerOutgoingConnections = cfg.MaxDefaultPeerOutgoingConnections - gnetCfg.DefaultConnections = cfg.DefaultConnections - gnetCfg.MaxIncomingMessageLength = cfg.MaxIncomingMessageLength - gnetCfg.MaxOutgoingMessageLength = cfg.MaxOutgoingMessageLength - - pool, err := gnet.NewConnectionPool(gnetCfg, d) - if err != nil { - return nil, err - } - - return &Pool{ - Config: cfg, - Pool: pool, - }, nil -} - -// Shutdown closes all connections and stops listening -func (pool *Pool) Shutdown() { - if pool == nil { - return - } - pool.Pool.Shutdown() -} - -// Run starts listening on the configured Port -func (pool *Pool) Run() error { - logger.Infof("daemon.Pool listening on port %d", pool.Config.port) - return pool.Pool.Run() -} - -// RunOffline runs the pool without a listener. This is necessary to process strand requests. -func (pool *Pool) RunOffline() error { - return pool.Pool.RunOffline() -} - -// sendPings send a ping if our last message sent was over pingRate ago -func (pool *Pool) sendPings() { - if err := pool.Pool.SendPings(pool.Config.PingRate, &PingMessage{}); err != nil { - logger.WithError(err).Error("sendPings failed") - } -} - -// getStaleConnections returns connections that have been idle for longer than idleLimit -func (pool *Pool) getStaleConnections() ([]string, error) { - return pool.Pool.GetStaleConnections(pool.Config.IdleLimit) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/signed_block_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/signed_block_skyencoder.go deleted file mode 100644 index 57e768b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/signed_block_skyencoder.go +++ /dev/null @@ -1,528 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeSignedBlock computes the size of an encoded object of type SignedBlock -func encodeSizeSignedBlock(obj *coin.SignedBlock) uint64 { - i0 := uint64(0) - - // obj.Block.Head.Version - i0 += 4 - - // obj.Block.Head.Time - i0 += 8 - - // obj.Block.Head.BkSeq - i0 += 8 - - // obj.Block.Head.Fee - i0 += 8 - - // obj.Block.Head.PrevHash - i0 += 32 - - // obj.Block.Head.BodyHash - i0 += 32 - - // obj.Block.Head.UxHash - i0 += 32 - - // obj.Block.Body.Transactions - i0 += 4 - for _, x1 := range obj.Block.Body.Transactions { - i1 := uint64(0) - - // x1.Length - i1 += 4 - - // x1.Type - i1++ - - // x1.InnerHash - i1 += 32 - - // x1.Sigs - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 65 - - i1 += uint64(len(x1.Sigs)) * i2 - } - - // x1.In - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 32 - - i1 += uint64(len(x1.In)) * i2 - } - - // x1.Out - i1 += 4 - { - i2 := uint64(0) - - // x2.Address.Version - i2++ - - // x2.Address.Key - i2 += 20 - - // x2.Coins - i2 += 8 - - // x2.Hours - i2 += 8 - - i1 += uint64(len(x1.Out)) * i2 - } - - i0 += i1 - } - - // obj.Sig - i0 += 65 - - return i0 -} - -// encodeSignedBlock encodes an object of type SignedBlock to a buffer allocated to the exact size -// required to encode the object. -func encodeSignedBlock(obj *coin.SignedBlock) ([]byte, error) { - n := encodeSizeSignedBlock(obj) - buf := make([]byte, n) - - if err := encodeSignedBlockToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeSignedBlockToBuffer encodes an object of type SignedBlock to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeSignedBlockToBuffer(buf []byte, obj *coin.SignedBlock) error { - if uint64(len(buf)) < encodeSizeSignedBlock(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Block.Head.Version - e.Uint32(obj.Block.Head.Version) - - // obj.Block.Head.Time - e.Uint64(obj.Block.Head.Time) - - // obj.Block.Head.BkSeq - e.Uint64(obj.Block.Head.BkSeq) - - // obj.Block.Head.Fee - e.Uint64(obj.Block.Head.Fee) - - // obj.Block.Head.PrevHash - e.CopyBytes(obj.Block.Head.PrevHash[:]) - - // obj.Block.Head.BodyHash - e.CopyBytes(obj.Block.Head.BodyHash[:]) - - // obj.Block.Head.UxHash - e.CopyBytes(obj.Block.Head.UxHash[:]) - - // obj.Block.Body.Transactions maxlen check - if len(obj.Block.Body.Transactions) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Block.Body.Transactions length check - if uint64(len(obj.Block.Body.Transactions)) > math.MaxUint32 { - return errors.New("obj.Block.Body.Transactions length exceeds math.MaxUint32") - } - - // obj.Block.Body.Transactions length - e.Uint32(uint32(len(obj.Block.Body.Transactions))) - - // obj.Block.Body.Transactions - for _, x := range obj.Block.Body.Transactions { - - // x.Length - e.Uint32(x.Length) - - // x.Type - e.Uint8(x.Type) - - // x.InnerHash - e.CopyBytes(x.InnerHash[:]) - - // x.Sigs maxlen check - if len(x.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Sigs length check - if uint64(len(x.Sigs)) > math.MaxUint32 { - return errors.New("x.Sigs length exceeds math.MaxUint32") - } - - // x.Sigs length - e.Uint32(uint32(len(x.Sigs))) - - // x.Sigs - for _, x := range x.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // x.In maxlen check - if len(x.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.In length check - if uint64(len(x.In)) > math.MaxUint32 { - return errors.New("x.In length exceeds math.MaxUint32") - } - - // x.In length - e.Uint32(uint32(len(x.In))) - - // x.In - for _, x := range x.In { - - // x - e.CopyBytes(x[:]) - - } - - // x.Out maxlen check - if len(x.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Out length check - if uint64(len(x.Out)) > math.MaxUint32 { - return errors.New("x.Out length exceeds math.MaxUint32") - } - - // x.Out length - e.Uint32(uint32(len(x.Out))) - - // x.Out - for _, x := range x.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - } - - // obj.Sig - e.CopyBytes(obj.Sig[:]) - - return nil -} - -// decodeSignedBlock decodes an object of type SignedBlock from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeSignedBlock(buf []byte, obj *coin.SignedBlock) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Block.Head.Version - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Block.Head.Version = i - } - - { - // obj.Block.Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Block.Head.Time = i - } - - { - // obj.Block.Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Block.Head.BkSeq = i - } - - { - // obj.Block.Head.Fee - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Block.Head.Fee = i - } - - { - // obj.Block.Head.PrevHash - if len(d.Buffer) < len(obj.Block.Head.PrevHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Head.PrevHash[:], d.Buffer[:len(obj.Block.Head.PrevHash)]) - d.Buffer = d.Buffer[len(obj.Block.Head.PrevHash):] - } - - { - // obj.Block.Head.BodyHash - if len(d.Buffer) < len(obj.Block.Head.BodyHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Head.BodyHash[:], d.Buffer[:len(obj.Block.Head.BodyHash)]) - d.Buffer = d.Buffer[len(obj.Block.Head.BodyHash):] - } - - { - // obj.Block.Head.UxHash - if len(d.Buffer) < len(obj.Block.Head.UxHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Head.UxHash[:], d.Buffer[:len(obj.Block.Head.UxHash)]) - d.Buffer = d.Buffer[len(obj.Block.Head.UxHash):] - } - - { - // obj.Block.Body.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Block.Body.Transactions = make([]coin.Transaction, length) - - for z3 := range obj.Block.Body.Transactions { - { - // obj.Block.Body.Transactions[z3].Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Block.Body.Transactions[z3].Length = i - } - - { - // obj.Block.Body.Transactions[z3].Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Block.Body.Transactions[z3].Type = i - } - - { - // obj.Block.Body.Transactions[z3].InnerHash - if len(d.Buffer) < len(obj.Block.Body.Transactions[z3].InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Body.Transactions[z3].InnerHash[:], d.Buffer[:len(obj.Block.Body.Transactions[z3].InnerHash)]) - d.Buffer = d.Buffer[len(obj.Block.Body.Transactions[z3].InnerHash):] - } - - { - // obj.Block.Body.Transactions[z3].Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Block.Body.Transactions[z3].Sigs = make([]cipher.Sig, length) - - for z5 := range obj.Block.Body.Transactions[z3].Sigs { - { - // obj.Block.Body.Transactions[z3].Sigs[z5] - if len(d.Buffer) < len(obj.Block.Body.Transactions[z3].Sigs[z5]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Body.Transactions[z3].Sigs[z5][:], d.Buffer[:len(obj.Block.Body.Transactions[z3].Sigs[z5])]) - d.Buffer = d.Buffer[len(obj.Block.Body.Transactions[z3].Sigs[z5]):] - } - - } - } - } - - { - // obj.Block.Body.Transactions[z3].In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Block.Body.Transactions[z3].In = make([]cipher.SHA256, length) - - for z5 := range obj.Block.Body.Transactions[z3].In { - { - // obj.Block.Body.Transactions[z3].In[z5] - if len(d.Buffer) < len(obj.Block.Body.Transactions[z3].In[z5]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Body.Transactions[z3].In[z5][:], d.Buffer[:len(obj.Block.Body.Transactions[z3].In[z5])]) - d.Buffer = d.Buffer[len(obj.Block.Body.Transactions[z3].In[z5]):] - } - - } - } - } - - { - // obj.Block.Body.Transactions[z3].Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Block.Body.Transactions[z3].Out = make([]coin.TransactionOutput, length) - - for z5 := range obj.Block.Body.Transactions[z3].Out { - { - // obj.Block.Body.Transactions[z3].Out[z5].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Block.Body.Transactions[z3].Out[z5].Address.Version = i - } - - { - // obj.Block.Body.Transactions[z3].Out[z5].Address.Key - if len(d.Buffer) < len(obj.Block.Body.Transactions[z3].Out[z5].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Block.Body.Transactions[z3].Out[z5].Address.Key[:], d.Buffer[:len(obj.Block.Body.Transactions[z3].Out[z5].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Block.Body.Transactions[z3].Out[z5].Address.Key):] - } - - { - // obj.Block.Body.Transactions[z3].Out[z5].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Block.Body.Transactions[z3].Out[z5].Coins = i - } - - { - // obj.Block.Body.Transactions[z3].Out[z5].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Block.Body.Transactions[z3].Out[z5].Hours = i - } - - } - } - } - } - } - } - - { - // obj.Sig - if len(d.Buffer) < len(obj.Sig) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Sig[:], d.Buffer[:len(obj.Sig)]) - d.Buffer = d.Buffer[len(obj.Sig):] - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeSignedBlockExact decodes an object of type SignedBlock from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeSignedBlockExact(buf []byte, obj *coin.SignedBlock) error { - if n, err := decodeSignedBlock(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/strand/strand.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/strand/strand.go deleted file mode 100644 index 5bdfbc6..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/strand/strand.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Package strand is a utility for linearizing method calls, similar to locking. - -The strand method is functionally similar to a lock, but operates on a queue -of method calls. -*/ -package strand - -import ( - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -const ( - // logDurationThreshold is how long to wait before reporting a function call's time - logDurationThreshold = time.Millisecond * 100 - // writeWait is how long to wait to write to a request channel before logging the delay - logQueueRequestWaitThreshold = time.Second * 1 -) - -var ( - // Debug enables debug logging - Debug = false -) - -// Request is sent to the channel provided to Strand -type Request struct { - Name string - Func func() error -} - -// Strand linearizes concurrent method calls through a single channel, -// to avoid concurrency issues when conflicting methods are called from -// multiple goroutines. -// Methods passed to Strand() will block until completed. -// Strand accepts a quit channel and will return quitErr if the quit -// channel closes. -func Strand(logger *logging.Logger, c chan Request, name string, f func() error, quit chan struct{}, quitErr error) error { - if Debug { - logger.WithField("operation", name).Debug("Strand precall") - } - - done := make(chan struct{}) - var err error - - req := Request{ - Name: name, - Func: func() error { - defer close(done) - - // TODO: record time statistics in a data structure and expose stats via an API - // logger.Debugf("%s begin", name) - - t := time.Now() - - // Log function duration at an exponential time interval, - // this will notify us of any long running functions to look at. - go func() { - threshold := logDurationThreshold - t := time.NewTimer(threshold) - defer t.Stop() - - for { - t0 := time.Now() - select { - case <-quit: - return - case <-done: - return - case <-t.C: - logger.WithFields(logrus.Fields{ - "operation": name, - "threshold": threshold, - }).Warning("Strand operation exceeded threshold") - threshold *= 10 - t.Reset(threshold) - } - t1 := time.Now() - logger.WithField("elapsed", t1.Sub(t0)).Info() - } - }() - - if Debug { - logger.WithField("operation", name).Debug("Stranding") - } - - err = f() - - // Notify us if the function call took too long - elapsed := time.Since(t) - if elapsed > logDurationThreshold { - logger.WithFields(logrus.Fields{ - "operation": name, - "elapsed": elapsed, - }).Warning() - } else if Debug { - logger.WithFields(logrus.Fields{ - "operation": name, - "elapsed": elapsed, - }).Debug() - } - - return err - }, - } - - // Log a message if waiting too long to write due to a full queue - t := time.Now() -loop: - for { - select { - case <-quit: - return quitErr - case c <- req: - break loop - case <-time.After(logQueueRequestWaitThreshold): - logger.Warningf("Waited %s while trying to write %s to the strand request channel", time.Since(t), req.Name) - } - } - - t = time.Now() - for { - select { - case <-quit: - return quitErr - case <-done: - return err - case <-time.After(logQueueRequestWaitThreshold): - logger.Warningf("Waited %s while waiting for %s to be done or quit", time.Since(t), req.Name) - } - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/daemon/transaction_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/daemon/transaction_skyencoder.go deleted file mode 100644 index 1db55f8..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/daemon/transaction_skyencoder.go +++ /dev/null @@ -1,358 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package daemon - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeTransaction computes the size of an encoded object of type Transaction -func encodeSizeTransaction(obj *coin.Transaction) uint64 { - i0 := uint64(0) - - // obj.Length - i0 += 4 - - // obj.Type - i0++ - - // obj.InnerHash - i0 += 32 - - // obj.Sigs - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 65 - - i0 += uint64(len(obj.Sigs)) * i1 - } - - // obj.In - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.In)) * i1 - } - - // obj.Out - i0 += 4 - { - i1 := uint64(0) - - // x1.Address.Version - i1++ - - // x1.Address.Key - i1 += 20 - - // x1.Coins - i1 += 8 - - // x1.Hours - i1 += 8 - - i0 += uint64(len(obj.Out)) * i1 - } - - return i0 -} - -// encodeTransaction encodes an object of type Transaction to a buffer allocated to the exact size -// required to encode the object. -func encodeTransaction(obj *coin.Transaction) ([]byte, error) { - n := encodeSizeTransaction(obj) - buf := make([]byte, n) - - if err := encodeTransactionToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeTransactionToBuffer encodes an object of type Transaction to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeTransactionToBuffer(buf []byte, obj *coin.Transaction) error { - if uint64(len(buf)) < encodeSizeTransaction(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Length - e.Uint32(obj.Length) - - // obj.Type - e.Uint8(obj.Type) - - // obj.InnerHash - e.CopyBytes(obj.InnerHash[:]) - - // obj.Sigs maxlen check - if len(obj.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Sigs length check - if uint64(len(obj.Sigs)) > math.MaxUint32 { - return errors.New("obj.Sigs length exceeds math.MaxUint32") - } - - // obj.Sigs length - e.Uint32(uint32(len(obj.Sigs))) - - // obj.Sigs - for _, x := range obj.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // obj.In maxlen check - if len(obj.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.In length check - if uint64(len(obj.In)) > math.MaxUint32 { - return errors.New("obj.In length exceeds math.MaxUint32") - } - - // obj.In length - e.Uint32(uint32(len(obj.In))) - - // obj.In - for _, x := range obj.In { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Out maxlen check - if len(obj.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Out length check - if uint64(len(obj.Out)) > math.MaxUint32 { - return errors.New("obj.Out length exceeds math.MaxUint32") - } - - // obj.Out length - e.Uint32(uint32(len(obj.Out))) - - // obj.Out - for _, x := range obj.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - return nil -} - -// decodeTransaction decodes an object of type Transaction from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeTransaction(buf []byte, obj *coin.Transaction) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Length = i - } - - { - // obj.Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Type = i - } - - { - // obj.InnerHash - if len(d.Buffer) < len(obj.InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.InnerHash[:], d.Buffer[:len(obj.InnerHash)]) - d.Buffer = d.Buffer[len(obj.InnerHash):] - } - - { - // obj.Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Sigs = make([]cipher.Sig, length) - - for z1 := range obj.Sigs { - { - // obj.Sigs[z1] - if len(d.Buffer) < len(obj.Sigs[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Sigs[z1][:], d.Buffer[:len(obj.Sigs[z1])]) - d.Buffer = d.Buffer[len(obj.Sigs[z1]):] - } - - } - } - } - - { - // obj.In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.In = make([]cipher.SHA256, length) - - for z1 := range obj.In { - { - // obj.In[z1] - if len(d.Buffer) < len(obj.In[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.In[z1][:], d.Buffer[:len(obj.In[z1])]) - d.Buffer = d.Buffer[len(obj.In[z1]):] - } - - } - } - } - - { - // obj.Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Out = make([]coin.TransactionOutput, length) - - for z1 := range obj.Out { - { - // obj.Out[z1].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Out[z1].Address.Version = i - } - - { - // obj.Out[z1].Address.Key - if len(d.Buffer) < len(obj.Out[z1].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Out[z1].Address.Key[:], d.Buffer[:len(obj.Out[z1].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Out[z1].Address.Key):] - } - - { - // obj.Out[z1].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Coins = i - } - - { - // obj.Out[z1].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out[z1].Hours = i - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeTransactionExact decodes an object of type Transaction from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeTransactionExact(buf []byte, obj *coin.Transaction) error { - if n, err := decodeTransaction(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/empty.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/empty.go deleted file mode 100644 index d93f2e3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/empty.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package kvstorage contains the implementation for a -// key-value storage of arbitrary data. `Manager` is used to -// access the storage contents. Each storage is presented by its own `KVStorageType` -// and each type has its own associated file to persist data -package kvstorage diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/error.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/error.go deleted file mode 100644 index ef634db..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/error.go +++ /dev/null @@ -1,16 +0,0 @@ -package kvstorage - -// Error wraps key-value storage related errors. -// It wraps errors caused by user input, but not errors caused by -// programmer input or internal issues. -type Error struct { - error -} - -// NewError creates an Error -func NewError(err error) error { - if err == nil { - return nil - } - return Error{err} -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/kvstorage.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/kvstorage.go deleted file mode 100644 index dd9caa0..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/kvstorage.go +++ /dev/null @@ -1,162 +0,0 @@ -package kvstorage - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sync" - - "github.com/SkycoinProject/skycoin/src/util/file" -) - -var ( - // ErrNoSuchKey is returned when the specified key does not exist - // in the storage instance - ErrNoSuchKey = NewError(errors.New("no such key exists in the storage")) -) - -// kvStorage is a key-value storage for storing arbitrary data -type kvStorage struct { - fn string - data map[string]string - sync.RWMutex -} - -// newKVStorage constructs new storage instance using the file with the filename -// to persist data -func newKVStorage(fn string) (*kvStorage, error) { - storage := kvStorage{ - fn: fn, - } - - if err := file.LoadJSON(fn, &storage.data); err != nil { - logger.Warningf("newKVStorage LoadJSON(%s) failed: %v", fn, err) - cfp, err := makeCorruptFilePath(fn) - if err != nil { - return nil, fmt.Errorf("Failed to make corrupt file path: %v", err) - } - if err := os.Rename(fn, cfp); err != nil { - return nil, fmt.Errorf("Rename %s to %s failed: %v", fn, cfp, err) - } - logger.Infof("Backup the corrupted file from: %s to %s", fn, cfp) - if err := initEmptyStorage(fn); err != nil { - return nil, err - } - storage.data = make(map[string]string) - } - - return &storage, nil -} - -// makeCorruptFilePath creates a $FILE.corrupt.$HASH string based on file path, -// where $HASH is truncated SHA1 of $FILE. -func makeCorruptFilePath(path string) (string, error) { - fileHash, err := shaFileID(path) - if err != nil { - return "", err - } - - dir, file := filepath.Split(path) - newFile := fmt.Sprintf("%s.corrupt.%s", file, fileHash) - newPath := filepath.Join(dir, newFile) - - return newPath, nil -} - -// shaFileID return the first 8 bytes of the SHA1 hash of the file, -// hex-encoded -func shaFileID(path string) (string, error) { - fi, err := os.Open(path) - if err != nil { - return "", err - } - defer fi.Close() - - h := sha256.New() - if _, err := io.Copy(h, fi); err != nil { - return "", err - } - - sum := h.Sum(nil) - encodedSum := base64.RawURLEncoding.EncodeToString(sum[:8]) - return encodedSum, nil -} - -// get gets the value associated with the `key`. Returns `ErrNoSuchKey` -func (s *kvStorage) get(key string) (string, error) { - s.RLock() - defer s.RUnlock() - - val, ok := s.data[key] - if !ok { - return "", ErrNoSuchKey - } - - return val, nil -} - -// getAll gets the snapshot of the current storage contents -func (s *kvStorage) getAll() map[string]string { - s.RLock() - defer s.RUnlock() - - return copyMap(s.data) -} - -// add adds the `val` value to the storage with the specified `key`. Replaces the -// original value if `key` already exists -func (s *kvStorage) add(key, val string) error { - s.Lock() - defer s.Unlock() - - // save original data - oldVal, oldOk := s.data[key] - - s.data[key] = val - - // try to persist data, fall back to original data on error - if err := s.flush(); err != nil { - if !oldOk { - delete(s.data, key) - } else { - s.data[key] = oldVal - } - - return err - } - - return nil -} - -// remove removes the value associated with the `key`. Returns `ErrNoSuchKey` -func (s *kvStorage) remove(key string) error { - s.Lock() - defer s.Unlock() - - if _, ok := s.data[key]; !ok { - return ErrNoSuchKey - } - - // save original data - oldVal := s.data[key] - - delete(s.data, key) - - // try to persist data, fall back to original data on error - if err := s.flush(); err != nil { - s.data[key] = oldVal - - return err - } - - return nil -} - -// flush persists data to file -func (s *kvStorage) flush() error { - return file.SaveJSON(s.fn, s.data, 0600) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager.go deleted file mode 100644 index f673259..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager.go +++ /dev/null @@ -1,252 +0,0 @@ -package kvstorage - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/SkycoinProject/skycoin/src/util/file" - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -// Type is a type of a key-value storage -type Type string - -const ( - // TypeTxIDNotes is a type of storage containing transaction notes - TypeTxIDNotes Type = "txid" - // TypeGeneral is a type of storage for general user data - TypeGeneral Type = "client" -) - -const storageFileExtension = ".json" - -var ( - // ErrStorageAPIDisabled is returned while trying to do storage actions while - // the EnableStorageAPI option is false - ErrStorageAPIDisabled = NewError(errors.New("Storage API is disabled")) - // ErrNoSuchStorage is returned if no storage with the specified storage type loaded - ErrNoSuchStorage = NewError(errors.New("Storage with such type is not loaded")) - // ErrStorageAlreadyLoaded is returned while trying to load already loaded storage - ErrStorageAlreadyLoaded = NewError(errors.New("Storage with such type is already loaded")) - // ErrUnknownKVStorageType is returned while trying to access the storage of the unknown type - ErrUnknownKVStorageType = NewError(errors.New("Unknown storage type")) - - logger = logging.MustGetLogger("kvstorage") -) - -// Manager is a manager for key-value storage instances -type Manager struct { - config Config - storages map[Type]*kvStorage - sync.Mutex -} - -// NewManager constructs new manager according to the config -func NewManager(c Config) (*Manager, error) { - logger.Info("Creating new KVStorage manager") - - m := &Manager{ - config: c, - storages: make(map[Type]*kvStorage), - } - - if !strings.HasSuffix(m.config.StorageDir, "/") { - m.config.StorageDir += "/" - } - - if !m.config.EnableStorageAPI { - logger.Info("KVStorage is disabled") - return m, nil - } - - if err := os.MkdirAll(m.config.StorageDir, os.FileMode(0700)); err != nil { - return nil, fmt.Errorf("failed to create kvstorage directory %s: %v", m.config.StorageDir, err) - } - - for _, t := range m.config.EnabledStorages { - if err := m.LoadStorage(t); err != nil { - return nil, err - } - } - - return m, nil -} - -// LoadStorage loads a new storage instance for the `storageType` -// into the manager. Returns `ErrStorageAlreadyLoaded`, `ErrStorageAPIDisabled`, -// `ErrUnknownKVStorageType` -func (m *Manager) LoadStorage(storageType Type) error { - if !isStorageTypeValid(storageType) { - return ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return ErrStorageAPIDisabled - } - - if m.storageExists(storageType) { - return ErrStorageAlreadyLoaded - } - - fn := m.getStorageFilePath(storageType) - - exists, err := file.Exists(fn) - if err != nil { - return fmt.Errorf("Manager.LoadStorage file.Exists failed: %v", err) - } - if !exists { - if err := initEmptyStorage(fn); err != nil { - return fmt.Errorf("Manager.LoadStorage initEmptyStorage failed: %v", err) - } - } - - storage, err := newKVStorage(fn) - if err != nil { - return err - } - - m.storages[storageType] = storage - - return nil -} - -// UnloadStorage unloads the storage instance for the given `storageType` from the manager. -// Returns `ErrNoSuchStorage`, `ErrStorageAPIDisabled`, `ErrUnknownKVStorageType` -func (m *Manager) UnloadStorage(storageType Type) error { - if !isStorageTypeValid(storageType) { - return ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return ErrStorageAPIDisabled - } - - if !m.storageExists(storageType) { - return ErrNoSuchStorage - } - - delete(m.storages, storageType) - - return nil -} - -// GetStorageValue gets the value associated with the `key` from the storage of `storageType. -// Returns `ErrNoSuchStorage`, `ErrStorageAPIDisabled`, `ErrUnknownKVStorageType` -func (m *Manager) GetStorageValue(storageType Type, key string) (string, error) { - if !isStorageTypeValid(storageType) { - return "", ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return "", ErrStorageAPIDisabled - } - - if !m.storageExists(storageType) { - return "", ErrNoSuchStorage - } - - return m.storages[storageType].get(key) -} - -// GetAllStorageValues gets the snapshot of the current contents from storage of `storageType`. -// Returns `ErrNoSuchStorage`, `ErrStorageAPIDisabled`, `ErrUnknownKVStorageType` -func (m *Manager) GetAllStorageValues(storageType Type) (map[string]string, error) { - if !isStorageTypeValid(storageType) { - return nil, ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return nil, ErrStorageAPIDisabled - } - - if !m.storageExists(storageType) { - return nil, ErrNoSuchStorage - } - - return m.storages[storageType].getAll(), nil -} - -// AddStorageValue adds the `val` with the associated `key` to the storage of `storageType`. -// Returns `ErrNoSuchStorage`, `ErrStorageAPIDisabled`, `ErrUnknownKVStorageType` -func (m *Manager) AddStorageValue(storageType Type, key, val string) error { - if !isStorageTypeValid(storageType) { - return ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return ErrStorageAPIDisabled - } - - if !m.storageExists(storageType) { - return ErrNoSuchStorage - } - - return m.storages[storageType].add(key, val) -} - -// RemoveStorageValue removes the value with the associated `key` from the storage of `storageType`. -// Returns `ErrNoSuchStorage`, `ErrStorageAPIDisabled`, `ErrUnknownKVStorageType` -func (m *Manager) RemoveStorageValue(storageType Type, key string) error { - if !isStorageTypeValid(storageType) { - return ErrUnknownKVStorageType - } - - m.Lock() - defer m.Unlock() - - if !m.config.EnableStorageAPI { - return ErrStorageAPIDisabled - } - - if !m.storageExists(storageType) { - return ErrNoSuchStorage - } - - return m.storages[storageType].remove(key) -} - -// storageExists checks whether the storage of `storageType` exists in the manager -func (m *Manager) storageExists(storageType Type) bool { - _, ok := m.storages[storageType] - - return ok -} - -// getStorageFilePath creates the path to the storage of `storageType` in file system -func (m *Manager) getStorageFilePath(storageType Type) string { - return filepath.Join(m.config.StorageDir, fmt.Sprintf("%s%s", storageType, storageFileExtension)) -} - -// isStorageTypeValid validates the given `storageType` against the predefined available types -func isStorageTypeValid(storageType Type) bool { - switch storageType { - case TypeTxIDNotes, TypeGeneral: - return true - } - - return false -} - -// initEmptyStorage creates a file to persist data -func initEmptyStorage(fn string) error { - return file.SaveJSON(fn, map[string]string{}, 0600) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager_config.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager_config.go deleted file mode 100644 index 82c68a9..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/manager_config.go +++ /dev/null @@ -1,15 +0,0 @@ -package kvstorage - -// Config is a configuration for storage manager -type Config struct { - StorageDir string - EnabledStorages []Type - EnableStorageAPI bool -} - -// NewConfig creates a default config. -func NewConfig() Config { - return Config{ - StorageDir: "./data/", - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/map.go b/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/map.go deleted file mode 100644 index 3df5277..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/kvstorage/map.go +++ /dev/null @@ -1,12 +0,0 @@ -package kvstorage - -// Copy copies the map contents to the new map -func copyMap(data map[string]string) map[string]string { - copied := make(map[string]string, len(data)) - - for k, v := range data { - copied[k] = v - } - - return copied -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/params/distribution.go b/vendor/github.com/SkycoinProject/skycoin/src/params/distribution.go deleted file mode 100644 index b1fe2d1..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/params/distribution.go +++ /dev/null @@ -1,153 +0,0 @@ -package params - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -// Distribution parameters define the initial coin distribution and unlocking schedule -type Distribution struct { - // MaxCoinSupply is the maximum supply of coins - MaxCoinSupply uint64 - // InitialUnlockedCount is the initial number of unlocked addresses - InitialUnlockedCount uint64 - // UnlockAddressRate is the number of addresses to unlock per unlock time interval - UnlockAddressRate uint64 - // UnlockTimeInterval is the distribution address unlock time interval, measured in seconds - // Once the InitialUnlockedCount is exhausted, - // UnlockAddressRate addresses will be unlocked per UnlockTimeInterval - UnlockTimeInterval uint64 - - // Addresses are the distribution addresses that received coins in the - // first block after the genesis block - Addresses []string - addressesDecoded []cipher.Address -} - -// MustValidate validates Distribution parameters, panics on error -func (d *Distribution) MustValidate() { - if err := d.Validate(); err != nil { - panic(err) - } -} - -// Validate validates Distribution parameters -func (d *Distribution) Validate() error { - if d.InitialUnlockedCount > uint64(len(d.Addresses)) { - return errors.New("unlocked addresses > total distribution addresses") - } - - if d.MaxCoinSupply%uint64(len(d.Addresses)) != 0 { - return errors.New("MaxCoinSupply should be perfectly divisible by len(addresses)") - } - - if err := d.decodeAddresses(); err != nil { - return err - } - - return nil -} - -// AddressInitialBalance is the initial balance of each distribution address -func (d *Distribution) AddressInitialBalance() uint64 { - return d.MaxCoinSupply / uint64(len(d.Addresses)) -} - -// UnlockedAddresses returns distribution addresses that are unlocked, i.e. they have spendable outputs -func (d *Distribution) UnlockedAddresses() []string { - // The first InitialUnlockedCount (25) addresses are unlocked by default. - // Subsequent addresses will be unlocked at a rate of UnlockAddressRate (5) per year, - // after the InitialUnlockedCount (25) addresses have no remaining balance. - // The unlock timer will be enabled manually once the - // InitialUnlockedCount (25) addresses are distributed. - - // NOTE: To have automatic unlocking, transaction verification would have - // to be handled in visor rather than in coin.Transactions.Visor(), because - // the coin package is agnostic to the state of the blockchain and cannot reference it. - // Instead of automatic unlocking, we can hardcode the timestamp at which the first 30% - // is distributed, then compute the unlocked addresses easily here. - - addrs := make([]string, d.InitialUnlockedCount) - copy(addrs[:], d.Addresses[:d.InitialUnlockedCount]) - return addrs -} - -// LockedAddresses returns distribution addresses that are locked, i.e. they have unspendable outputs -func (d *Distribution) LockedAddresses() []string { - // TODO -- once we reach 30% distribution, we can hardcode the - // initial timestamp for releasing more coins - addrs := make([]string, d.numLocked()) - copy(addrs, d.Addresses[d.InitialUnlockedCount:]) - return addrs -} - -// AddressesDecoded returns a copy of the hardcoded distribution addresses array. -// Each address has 1,000,000 coins. There are 100 addresses. -func (d *Distribution) AddressesDecoded() []cipher.Address { - d.mustDecodeAddresses() - addrs := make([]cipher.Address, len(d.addressesDecoded)) - copy(addrs, d.addressesDecoded) - return addrs -} - -// UnlockedAddressesDecoded returns distribution addresses that are unlocked, i.e. they have spendable outputs -func (d *Distribution) UnlockedAddressesDecoded() []cipher.Address { - // The first d.InitialUnlockedCount (25) addresses are unlocked by default. - // Subsequent addresses will be unlocked at a rate of UnlockAddressRate (5) per year, - // after the d.InitialUnlockedCount (25) addresses have no remaining balance. - // The unlock timer will be enabled manually once the - // d.InitialUnlockedCount (25) addresses are distributed. - - // NOTE: To have automatic unlocking, transaction verification would have - // to be handled in visor rather than in coin.Transactions.Visor(), because - // the coin package is agnostic to the state of the blockchain and cannot reference it. - // Instead of automatic unlocking, we can hardcode the timestamp at which the first 30% - // is distributed, then compute the unlocked addresses easily here. - d.mustDecodeAddresses() - addrs := make([]cipher.Address, d.InitialUnlockedCount) - copy(addrs[:], d.addressesDecoded[:d.InitialUnlockedCount]) - return addrs -} - -// LockedAddressesDecoded returns distribution addresses that are locked, i.e. they have unspendable outputs -func (d *Distribution) LockedAddressesDecoded() []cipher.Address { - // TODO -- once we reach 30% distribution, we can hardcode the - // initial timestamp for releasing more coins - d.mustDecodeAddresses() - addrs := make([]cipher.Address, d.numLocked()) - copy(addrs, d.addressesDecoded[d.InitialUnlockedCount:]) - return addrs -} - -func (d *Distribution) numLocked() uint64 { - n := uint64(len(d.Addresses)) - if n < d.InitialUnlockedCount { - panic("number of distribution addresses is less than InitialUnlockedCount") - } - return n - d.InitialUnlockedCount -} - -func (d *Distribution) decodeAddresses() error { - if len(d.addressesDecoded) == len(d.Addresses) { - return nil - } - - decodedAddrs := make([]cipher.Address, len(d.Addresses)) - for i, a := range d.Addresses { - var err error - decodedAddrs[i], err = cipher.DecodeBase58Address(a) - if err != nil { - return err - } - } - - d.addressesDecoded = decodedAddrs - return nil -} - -func (d *Distribution) mustDecodeAddresses() { - if err := d.decodeAddresses(); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/params/droplet.go b/vendor/github.com/SkycoinProject/skycoin/src/params/droplet.go deleted file mode 100644 index fb0c57a..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/params/droplet.go +++ /dev/null @@ -1,34 +0,0 @@ -package params - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/util/droplet" -) - -var ( - // ErrInvalidDecimals is returned by DropletPrecisionCheck if a coin amount has an invalid number of decimal places - ErrInvalidDecimals = errors.New("invalid amount, too many decimal places") -) - -// DropletPrecisionToDivisor converts number of allowed decimal places to the modulus divisor used when checking droplet precision rules -func DropletPrecisionToDivisor(precision uint8) uint64 { - if precision > droplet.Exponent { - panic("precision must be <= droplet.Exponent") - } - - n := droplet.Exponent - precision - var i uint64 = 1 - for k := uint8(0); k < n; k++ { - i = i * 10 - } - return i -} - -// DropletPrecisionCheck checks if an amount of coins is valid given decimal place restrictions -func DropletPrecisionCheck(precision uint8, amount uint64) error { - if amount%DropletPrecisionToDivisor(precision) != 0 { - return ErrInvalidDecimals - } - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/params/init.go b/vendor/github.com/SkycoinProject/skycoin/src/params/init.go deleted file mode 100644 index a6a6422..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/params/init.go +++ /dev/null @@ -1,78 +0,0 @@ -package params - -import ( - "fmt" - "os" - "strconv" - - "github.com/SkycoinProject/skycoin/src/util/droplet" -) - -func init() { - loadUserBurnFactor() - loadUserMaxTransactionSize() - loadUserMaxDecimals() - sanityCheck() -} - -func sanityCheck() { - if err := UserVerifyTxn.Validate(); err != nil { - panic(err) - } - - MainNetDistribution.MustValidate() -} - -func loadUserBurnFactor() { - xs := os.Getenv("USER_BURN_FACTOR") - if xs == "" { - return - } - - x, err := strconv.ParseUint(xs, 10, 32) - if err != nil { - panic(fmt.Sprintf("Invalid USER_BURN_FACTOR %q: %v", xs, err)) - } - - if x < uint64(MinBurnFactor) { - panic(fmt.Sprintf("USER_BURN_FACTOR must be >= %d", MinBurnFactor)) - } - - UserVerifyTxn.BurnFactor = uint32(x) -} - -func loadUserMaxTransactionSize() { - xs := os.Getenv("USER_MAX_TXN_SIZE") - if xs == "" { - return - } - - x, err := strconv.ParseUint(xs, 10, 32) - if err != nil { - panic(fmt.Sprintf("Invalid USER_MAX_TXN_SIZE %q: %v", xs, err)) - } - - if x < uint64(MinTransactionSize) { - panic(fmt.Sprintf("USER_MAX_TXN_SIZE must be >= %d", MinTransactionSize)) - } - - UserVerifyTxn.MaxTransactionSize = uint32(x) -} - -func loadUserMaxDecimals() { - xs := os.Getenv("USER_MAX_DECIMALS") - if xs == "" { - return - } - - x, err := strconv.ParseUint(xs, 10, 8) - if err != nil { - panic(fmt.Sprintf("Invalid USER_MAX_DECIMALS %q: %v", xs, err)) - } - - if x > uint64(droplet.Exponent) { - panic(fmt.Sprintf("USER_MAX_DECIMALS must be <= %d", droplet.Exponent)) - } - - UserVerifyTxn.MaxDropletPrecision = uint8(x) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/params/params.go b/vendor/github.com/SkycoinProject/skycoin/src/params/params.go deleted file mode 100644 index 7ae26c1..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/params/params.go +++ /dev/null @@ -1,128 +0,0 @@ -package params - -/* -CODE GENERATED AUTOMATICALLY WITH FIBER COIN CREATOR -AVOID EDITING THIS MANUALLY -*/ - -var ( - // MainNetDistribution Skycoin mainnet coin distribution parameters - MainNetDistribution = Distribution{ - MaxCoinSupply: 100000000, - InitialUnlockedCount: 25, - UnlockAddressRate: 5, - UnlockTimeInterval: 31536000, - Addresses: []string{ - "R6aHqKWSQfvpdo2fGSrq4F1RYXkBWR9HHJ", - "2EYM4WFHe4Dgz6kjAdUkM6Etep7ruz2ia6h", - "25aGyzypSA3T9K6rgPUv1ouR13efNPtWP5m", - "ix44h3cojvN6nqGcdpy62X7Rw6Ahnr3Thk", - "AYV8KEBEAPCg8a59cHgqHMqYHP9nVgQDyW", - "2Nu5Jv5Wp3RYGJU1EkjWFFHnebxMx1GjfkF", - "2THDupTBEo7UqB6dsVizkYUvkKq82Qn4gjf", - "tWZ11Nvor9parjg4FkwxNVcby59WVTw2iL", - "m2joQiJRZnj3jN6NsoKNxaxzUTijkdRoSR", - "8yf8PAQqU2cDj8Yzgz3LgBEyDqjvCh2xR7", - "sgB3n11ZPUYHToju6TWMpUZTUcKvQnoFMJ", - "2UYPbDBnHUEc67e7qD4eXtQQ6zfU2cyvAvk", - "wybwGC9rhm8ZssBuzpy5goXrAdE31MPdsj", - "JbM25o7kY7hqJZt3WGYu9pHZFCpA9TCR6t", - "2efrft5Lnwjtk7F1p9d7BnPd72zko2hQWNi", - "Syzmb3MiMoiNVpqFdQ38hWgffHg86D2J4e", - "2g3GUmTQooLrNHaRDhKtLU8rWLz36Beow7F", - "D3phtGr9iv6238b3zYXq6VgwrzwvfRzWZQ", - "gpqsFSuMCZmsjPc6Rtgy1FmLx424tH86My", - "2EUF3GPEUmfocnUc1w6YPtqXVCy3UZA4rAq", - "TtAaxB3qGz5zEAhhiGkBY9VPV7cekhvRYS", - "2fM5gVpi7XaiMPm4i29zddTNkmrKe6TzhVZ", - "ix3NDKgxfYYANKAb5kbmwBYXPrkAsha7uG", - "2RkPshpFFrkuaP98GprLtgHFTGvPY5e6wCK", - "Ak1qCDNudRxZVvcW6YDAdD9jpYNNStAVqm", - "2eZYSbzBKJ7QCL4kd5LSqV478rJQGb4UNkf", - "KPfqM6S96WtRLMuSy4XLfVwymVqivdcDoM", - "5B98bU1nsedGJBdRD5wLtq7Z8t8ZXio8u5", - "2iZWk5tmBynWxj2PpAFyiZzEws9qSnG3a6n", - "XUGdPaVnMh7jtzPe3zkrf9FKh5nztFnQU5", - "hSNgHgewJme8uaHrEuKubHYtYSDckD6hpf", - "2DeK765jLgnMweYrMp1NaYHfzxumfR1PaQN", - "orrAssY5V2HuQAbW9K6WktFrGieq2m23pr", - "4Ebf4PkG9QEnQTm4MVvaZvJV6Y9av3jhgb", - "7Uf5xJ3GkiEKaLxC2WmJ1t6SeekJeBdJfu", - "oz4ytDKbCqpgjW3LPc52pW2CaK2gxCcWmL", - "2ex5Z7TufQ5Z8xv5mXe53fSQRfUr35SSo7Q", - "WV2ap7ZubTxeDdmEZ1Xo7ufGMkekLWikJu", - "ckCTV4r1pNuz6j2VBRHhaJN9HsCLY7muLV", - "MXJx96ZJVSjktgeYZpVK8vn1H3xWP8ooq5", - "wyQVmno9aBJZmQ99nDSLoYWwp7YDJCWsrH", - "2cc9wKxCsFNRkoAQDAoHke3ZoyL1mSV14cj", - "29k9g3F5AYfVaa1joE1PpZjBED6hQXes8Mm", - "2XPLzz4ZLf1A9ykyTCjW5gEmVjnWa8CuatH", - "iH7DqqojTgUn2JxmY9hgFp165Nk7wKfan9", - "RJzzwUs3c9C8Y7NFYzNfFoqiUKeBhBfPki", - "2W2cGyiCRM4nwmmiGPgMuGaPGeBzEm7VZPn", - "ALJVNKYL7WGxFBSriiZuwZKWD4b7fbV1od", - "tBaeg9zE2sgmw5ZQENaPPYd6jfwpVpGTzS", - "2hdTw5Hk3rsgpZjvk8TyKcCZoRVXU5QVrUt", - "A1QU6jKq8YgTP79M8fwZNHUZc7hConFKmy", - "q9RkXoty3X1fuaypDDRUi78rWgJWYJMmpJ", - "2Xvm6is5cAPA85xnSYXDuAqiRyoXiky5RaD", - "4CW2CPJEzxhn2PS4JoSLoWGL5QQ7dL2eji", - "24EG6uTzL7DHNzcwsygYGRR1nfu5kco7AZ1", - "KghGnWw5fppTrqHSERXZf61yf7GkuQdCnV", - "2WojewRA3LbpyXTP9ANy8CZqJMgmyNm3MDr", - "2BsMfywmGV3M2CoDA112Rs7ZBkiMHfy9X11", - "kK1Q4gPyYfVVMzQtAPRzL8qXMqJ67Y7tKs", - "28J4mx8xfUtM92DbQ6i2Jmqw5J7dNivfroN", - "gQvgyG1djgtftoCVrSZmsRxr7okD4LheKw", - "3iFGBKapAWWzbiGFSr5ScbhrEPm6Esyvia", - "NFW2akQH2vu7AqkQXxFz2P5vkXTWkSqrSm", - "2MQJjLnWRp9eHh6MpCwpiUeshhtmri12mci", - "2QjRQUMyL6iodtHP9zKmxCNYZ7k3jxtk49C", - "USdfKy7B6oFNoauHWMmoCA7ND9rHqYw2Mf", - "cA49et9WtptYHf6wA1F8qqVgH3kS5jJ9vK", - "qaJT9TjcMi46sTKcgwRQU8o5Lw2Ea1gC4N", - "22pyn5RyhqtTQu4obYjuWYRNNw4i54L8xVr", - "22dkmukC6iH4FFLBmHne6modJZZQ3MC9BAT", - "z6CJZfYLvmd41GRVE8HASjRcy5hqbpHZvE", - "GEBWJ2KpRQDBTCCtvnaAJV2cYurgXS8pta", - "oS8fbEm82cprmAeineBeDkaKd7QownDZQh", - "rQpAs1LVQdphyj9ipEAuukAoj9kNpSP8cM", - "6NSJKsPxmqipGAfFFhUKbkopjrvEESTX3j", - "cuC68ycVXmD2EBzYFNYQ6akhKGrh3FGjSf", - "bw4wtYU8toepomrhWP2p8UFYfHBbvEV425", - "HvgNmDz5jD39Gwmi9VfDY1iYMhZUpZ8GKz", - "SbApuZAYquWP3Q6iD51BcMBQjuApYEkRVf", - "2Ugii5yxJgLzC59jV1vF8GK7UBZdvxwobeJ", - "21N2iJ1qnQRiJWcEqNRxXwfNp8QcmiyhtPy", - "9TC4RGs6AtFUsbcVWnSoCdoCpSfM66ALAc", - "oQzn55UWG4iMcY9bTNb27aTnRdfiGHAwbD", - "2GCdwsRpQhcf8SQcynFrMVDM26Bbj6sgv9M", - "2NRFe7REtSmaM2qAgZeG45hC8EtVGV2QjeB", - "25RGnhN7VojHUTvQBJA9nBT5y1qTQGULMzR", - "26uCBDfF8E2PJU2Dzz2ysgKwv9m4BhodTz9", - "Wkvima5cF7DDFdmJQqcdq8Syaq9DuAJJRD", - "286hSoJYxvENFSHwG51ZbmKaochLJyq4ERQ", - "FEGxF3HPoM2HCWHn82tyeh9o7vEQq5ySGE", - "h38DxNxGhWGTq9p5tJnN5r4Fwnn85Krrb6", - "2c1UU8J6Y3kL4cmQh21Tj8wkzidCiZxwdwd", - "2bJ32KuGmjmwKyAtzWdLFpXNM6t83CCPLq5", - "2fi8oLC9zfVVGnzzQtu3Y3rffS65Hiz6QHo", - "TKD93RxFr2Am44TntLiJQus4qcEwTtvEEQ", - "zMDywYdGEDtTSvWnCyc3qsYHWwj9ogws74", - "25NbotTka7TwtbXUpSCQD8RMgHKspyDubXJ", - "2ayCELBERubQWH5QxUr3cTxrYpidvUAzsSw", - "RMTCwLiYDKEAiJu5ekHL1NQ8UKHi5ozCPg", - "ejJjiCwp86ykmFr5iTJ8LxQXJ2wJPTYmkm", - }, - } - - // UserVerifyTxn transaction verification parameters for user-created transactions - UserVerifyTxn = VerifyTxn{ - // BurnFactor can be overriden with `USER_BURN_FACTOR` env var - BurnFactor: 10, - // MaxTransactionSize can be overriden with `USER_MAX_TXN_SIZE` env var - MaxTransactionSize: 32768, // in bytes - // MaxDropletPrecision can be overriden with `USER_MAX_DECIMALS` env var - MaxDropletPrecision: 3, - } -) diff --git a/vendor/github.com/SkycoinProject/skycoin/src/params/verify_txn.go b/vendor/github.com/SkycoinProject/skycoin/src/params/verify_txn.go deleted file mode 100644 index 0296c1f..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/params/verify_txn.go +++ /dev/null @@ -1,55 +0,0 @@ -package params - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/util/droplet" -) - -const ( - // MinBurnFactor minimum value for BurnFactor - MinBurnFactor uint32 = 2 - // MinTransactionSize minimum value for MaxTransactionSize - MinTransactionSize uint32 = 1024 -) - -var ( - // ErrInvalidBurnFactor BurnFactor value is out of range - ErrInvalidBurnFactor = errors.New("BurnFactor value is out of range") - // ErrInvalidMaxTransactionSize MaxTransactionSize value is out of range - ErrInvalidMaxTransactionSize = errors.New("MaxTransactionSize value is out of range") - // ErrInvalidMaxDropletPrecision MaxDropletPrecision value is out of range - ErrInvalidMaxDropletPrecision = errors.New("MaxDropletPrecision value is out of range") -) - -// VerifyTxn are parameters for verifying a transaction -type VerifyTxn struct { - // BurnFactor inverse fraction of coinhours that must be burned - BurnFactor uint32 - // MaxTransactionSize maximum size of a transaction in bytes - MaxTransactionSize uint32 - // MaxDropletPrecision maximum decimal precision of droplets - MaxDropletPrecision uint8 -} - -// MaxDropletDivisor return the modulus divisor used when checking droplet precision rules -func (v VerifyTxn) MaxDropletDivisor() uint64 { - return DropletPrecisionToDivisor(v.MaxDropletPrecision) -} - -// Validate validates the configured parameters -func (v VerifyTxn) Validate() error { - if v.BurnFactor < MinBurnFactor { - return ErrInvalidBurnFactor - } - - if v.MaxTransactionSize < MinTransactionSize { - return ErrInvalidMaxTransactionSize - } - - if v.MaxDropletPrecision > droplet.Exponent { - return ErrInvalidMaxDropletPrecision - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/block.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/block.go deleted file mode 100644 index 211d625..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/block.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Package readable defines JSON-tagged struct representations of internal binary data structures, -for use by the API and CLI. -*/ -package readable - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// BlockHeader represents the readable block header -type BlockHeader struct { - BkSeq uint64 `json:"seq"` - Hash string `json:"block_hash"` - PreviousHash string `json:"previous_block_hash"` - Time uint64 `json:"timestamp"` - Fee uint64 `json:"fee"` - Version uint32 `json:"version"` - BodyHash string `json:"tx_body_hash"` - UxHash string `json:"ux_hash"` -} - -// NewBlockHeader creates a readable block header -func NewBlockHeader(b coin.BlockHeader) BlockHeader { - return BlockHeader{ - BkSeq: b.BkSeq, - Hash: b.Hash().Hex(), - PreviousHash: b.PrevHash.Hex(), - Time: b.Time, - Fee: b.Fee, - Version: b.Version, - BodyHash: b.BodyHash.Hex(), - UxHash: b.UxHash.Hex(), - } -} - -// ToCoinBlockHeader converts BlockHeader back to coin.BlockHeader -func (bh BlockHeader) ToCoinBlockHeader() (coin.BlockHeader, error) { - prevHash, err := cipher.SHA256FromHex(bh.PreviousHash) - if err != nil { - return coin.BlockHeader{}, err - } - - bodyHash, err := cipher.SHA256FromHex(bh.BodyHash) - if err != nil { - return coin.BlockHeader{}, err - } - - uxHash, err := cipher.SHA256FromHex(bh.UxHash) - if err != nil { - return coin.BlockHeader{}, err - } - - headHash, err := cipher.SHA256FromHex(bh.Hash) - if err != nil { - return coin.BlockHeader{}, err - } - - cbh := coin.BlockHeader{ - Version: bh.Version, - Time: bh.Time, - BkSeq: bh.BkSeq, - Fee: bh.Fee, - PrevHash: prevHash, - BodyHash: bodyHash, - UxHash: uxHash, - } - - if cbh.Hash() != headHash { - return coin.BlockHeader{}, errors.New("readable.BlockHeader.Hash != recovered coin.BlockHeader.Hash()") - } - - return cbh, nil -} - -// BlockBody represents a readable block body -type BlockBody struct { - Transactions []Transaction `json:"txns"` -} - -// NewBlockBody creates a readable block body -func NewBlockBody(b coin.Block) (*BlockBody, error) { - txns := make([]Transaction, len(b.Body.Transactions)) - isGenesis := b.Head.BkSeq == 0 - for i := range b.Body.Transactions { - txn, err := NewTransaction(b.Body.Transactions[i], isGenesis) - if err != nil { - return nil, err - } - txns[i] = *txn - } - - return &BlockBody{ - Transactions: txns, - }, nil -} - -// Block represents a readable block -type Block struct { - Head BlockHeader `json:"header"` - Body BlockBody `json:"body"` - Size uint32 `json:"size"` -} - -// NewBlock creates a readable block -func NewBlock(b coin.Block) (*Block, error) { - body, err := NewBlockBody(b) - if err != nil { - return nil, err - } - - size, err := b.Size() - if err != nil { - return nil, err - } - - return &Block{ - Head: NewBlockHeader(b.Head), - Body: *body, - Size: size, - }, nil -} - -// Blocks an array of readable blocks. -type Blocks struct { - Blocks []Block `json:"blocks"` -} - -// NewBlocks converts []coin.SignedBlock to Blocks -func NewBlocks(blocks []coin.SignedBlock) (*Blocks, error) { - rbs := make([]Block, 0, len(blocks)) - for _, b := range blocks { - rb, err := NewBlock(b.Block) - if err != nil { - return nil, err - } - rbs = append(rbs, *rb) - } - return &Blocks{ - Blocks: rbs, - }, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/blockchain.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/blockchain.go deleted file mode 100644 index cd55ccd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/blockchain.go +++ /dev/null @@ -1,58 +0,0 @@ -package readable - -import ( - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// BlockchainMetadata encapsulates useful information from the coin.Blockchain -type BlockchainMetadata struct { - // Most recent block's header - Head BlockHeader `json:"head"` - // Number of unspent outputs in the coin.Blockchain - Unspents uint64 `json:"unspents"` - // Number of known unconfirmed txns - Unconfirmed uint64 `json:"unconfirmed"` -} - -// NewBlockchainMetadata creates blockchain metadata -func NewBlockchainMetadata(bm visor.BlockchainMetadata) BlockchainMetadata { - return BlockchainMetadata{ - Head: NewBlockHeader(bm.HeadBlock.Head), - Unspents: bm.Unspents, - Unconfirmed: bm.Unconfirmed, - } -} - -// BlockchainProgress is the current blockchain syncing status -type BlockchainProgress struct { - // Our current blockchain length - Current uint64 `json:"current"` - // Our best guess at true blockchain length - Highest uint64 `json:"highest"` - // Individual blockchain length reports from peers - Peers []PeerBlockchainHeight `json:"peers"` -} - -// PeerBlockchainHeight is a peer's IP address with their reported blockchain height -type PeerBlockchainHeight struct { - Address string `json:"address"` - Height uint64 `json:"height"` -} - -// NewBlockchainProgress copies daemon.BlockchainProgress to a struct with json tags -func NewBlockchainProgress(bp *daemon.BlockchainProgress) BlockchainProgress { - peers := make([]PeerBlockchainHeight, len(bp.Peers)) - for i, p := range bp.Peers { - peers[i] = PeerBlockchainHeight{ - Address: p.Address, - Height: p.Height, - } - } - - return BlockchainProgress{ - Current: bp.Current, - Highest: bp.Highest, - Peers: peers, - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/fiber.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/fiber.go deleted file mode 100644 index 3c5bc92..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/fiber.go +++ /dev/null @@ -1,16 +0,0 @@ -package readable - -import "github.com/SkycoinProject/skycoin/src/cipher/bip44" - -// FiberConfig is fiber configuration parameters -type FiberConfig struct { - Name string `json:"name"` - DisplayName string `json:"display_name"` - Ticker string `json:"ticker"` - CoinHoursName string `json:"coin_hours_display_name"` - CoinHoursNameSingular string `json:"coin_hours_display_name_singular"` - CoinHoursTicker string `json:"coin_hours_ticker"` - ExplorerURL string `json:"explorer_url"` - VersionURL string `json:"version_url"` - Bip44Coin bip44.CoinType `json:"bip44_coin"` -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/network.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/network.go deleted file mode 100644 index 777730b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/network.go +++ /dev/null @@ -1,73 +0,0 @@ -package readable - -import ( - "github.com/SkycoinProject/skycoin/src/daemon" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/useragent" -) - -// Connection a connection's state within the daemon -type Connection struct { - GnetID uint64 `json:"id"` - Addr string `json:"address"` - LastSent int64 `json:"last_sent"` - LastReceived int64 `json:"last_received"` - ConnectedAt int64 `json:"connected_at"` - Outgoing bool `json:"outgoing"` - State daemon.ConnectionState `json:"state"` - Mirror uint32 `json:"mirror"` - ListenPort uint16 `json:"listen_port"` - Height uint64 `json:"height"` - UserAgent useragent.Data `json:"user_agent"` - IsTrustedPeer bool `json:"is_trusted_peer"` - UnconfirmedVerifyTxn VerifyTxn `json:"unconfirmed_verify_transaction"` -} - -// NewConnection copies daemon.Connection to a struct with json tags -func NewConnection(c *daemon.Connection) Connection { - var lastSent int64 - var lastReceived int64 - var connectedAt int64 - - if !c.Gnet.LastSent.IsZero() { - lastSent = c.Gnet.LastSent.Unix() - } - if !c.Gnet.LastReceived.IsZero() { - lastReceived = c.Gnet.LastReceived.Unix() - } - if !c.ConnectedAt.IsZero() { - connectedAt = c.ConnectedAt.Unix() - } - - return Connection{ - GnetID: c.Gnet.ID, - Addr: c.Addr, - LastSent: lastSent, - LastReceived: lastReceived, - ConnectedAt: connectedAt, - Outgoing: c.Outgoing, - State: c.State, - Mirror: c.Mirror, - ListenPort: c.ListenPort, - Height: c.Height, - UserAgent: c.UserAgent, - IsTrustedPeer: c.Pex.Trusted, - UnconfirmedVerifyTxn: NewVerifyTxn(c.UnconfirmedVerifyTxn), - } -} - -// VerifyTxn transaction verification parameters -type VerifyTxn struct { - BurnFactor uint32 `json:"burn_factor"` - MaxTransactionSize uint32 `json:"max_transaction_size"` - MaxDropletPrecision uint8 `json:"max_decimals"` -} - -// NewVerifyTxn converts params.VerifyTxn to VerifyTxn -func NewVerifyTxn(p params.VerifyTxn) VerifyTxn { - return VerifyTxn{ - BurnFactor: p.BurnFactor, - MaxTransactionSize: p.MaxTransactionSize, - MaxDropletPrecision: p.MaxDropletPrecision, - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/output.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/output.go deleted file mode 100644 index a06fc0c..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/output.go +++ /dev/null @@ -1,276 +0,0 @@ -package readable - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/transaction" - "github.com/SkycoinProject/skycoin/src/util/droplet" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/visor" - "github.com/SkycoinProject/skycoin/src/visor/historydb" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -// UnspentOutput represents a readable output -type UnspentOutput struct { - Hash string `json:"hash"` - Time uint64 `json:"time"` - BkSeq uint64 `json:"block_seq"` - SourceTransaction string `json:"src_tx"` - Address string `json:"address"` - Coins string `json:"coins"` - Hours uint64 `json:"hours"` - CalculatedHours uint64 `json:"calculated_hours"` -} - -// NewUnspentOutput creates a readable output -func NewUnspentOutput(uxOut visor.UnspentOutput) (UnspentOutput, error) { - coinStr, err := droplet.ToString(uxOut.Body.Coins) - if err != nil { - return UnspentOutput{}, err - } - - return UnspentOutput{ - Hash: uxOut.Hash().Hex(), - Time: uxOut.Head.Time, - BkSeq: uxOut.Head.BkSeq, - SourceTransaction: uxOut.Body.SrcTransaction.Hex(), - Address: uxOut.Body.Address.String(), - Coins: coinStr, - Hours: uxOut.Body.Hours, - CalculatedHours: uxOut.CalculatedHours, - }, nil -} - -// UnspentOutputs slice of UnspentOutput -type UnspentOutputs []UnspentOutput - -// NewUnspentOutputs converts unspent outputs to a readable output -func NewUnspentOutputs(uxs []visor.UnspentOutput) (UnspentOutputs, error) { - rxReadables := make(UnspentOutputs, len(uxs)) - for i, ux := range uxs { - out, err := NewUnspentOutput(ux) - if err != nil { - return UnspentOutputs{}, err - } - - rxReadables[i] = out - } - - // Sort UnspentOutputs newest to oldest, using hash to break ties - sort.Slice(rxReadables, func(i, j int) bool { - if rxReadables[i].Time == rxReadables[j].Time { - return strings.Compare(rxReadables[i].Hash, rxReadables[j].Hash) < 0 - } - return rxReadables[i].Time > rxReadables[j].Time - }) - - return rxReadables, nil -} - -// Balance returns the balance in droplets -func (ros UnspentOutputs) Balance() (wallet.Balance, error) { - var bal wallet.Balance - for _, out := range ros { - coins, err := droplet.FromString(out.Coins) - if err != nil { - return wallet.Balance{}, err - } - - bal.Coins, err = mathutil.AddUint64(bal.Coins, coins) - if err != nil { - return wallet.Balance{}, err - } - - bal.Hours, err = mathutil.AddUint64(bal.Hours, out.CalculatedHours) - if err != nil { - return wallet.Balance{}, err - } - } - - return bal, nil -} - -// ToUxArray converts UnspentOutputs to coin.UxArray -func (ros UnspentOutputs) ToUxArray() (coin.UxArray, error) { - var uxs coin.UxArray - for _, o := range ros { - coins, err := droplet.FromString(o.Coins) - if err != nil { - return nil, err - } - - addr, err := cipher.DecodeBase58Address(o.Address) - if err != nil { - return nil, err - } - - srcTx, err := cipher.SHA256FromHex(o.SourceTransaction) - if err != nil { - return nil, err - } - - uxs = append(uxs, coin.UxOut{ - Head: coin.UxHead{ - Time: o.Time, - BkSeq: o.BkSeq, - }, - Body: coin.UxBody{ - SrcTransaction: srcTx, - Address: addr, - Coins: coins, - Hours: o.Hours, - }, - }) - } - - return uxs, nil -} - -// OutputsToUxBalances converts UnspentOutputs to []transaction.UxBalance -func OutputsToUxBalances(ros UnspentOutputs) ([]transaction.UxBalance, error) { - uxb := make([]transaction.UxBalance, len(ros)) - for i, ro := range ros { - if ro.Hash == "" { - return nil, errors.New("UnspentOutput missing hash") - } - - hash, err := cipher.SHA256FromHex(ro.Hash) - if err != nil { - return nil, fmt.Errorf("UnspentOutput hash is invalid: %v", err) - } - - coins, err := droplet.FromString(ro.Coins) - if err != nil { - return nil, fmt.Errorf("UnspentOutput coins is invalid: %v", err) - } - - addr, err := cipher.DecodeBase58Address(ro.Address) - if err != nil { - return nil, fmt.Errorf("UnspentOutput address is invalid: %v", err) - } - - srcTx, err := cipher.SHA256FromHex(ro.SourceTransaction) - if err != nil { - return nil, fmt.Errorf("UnspentOutput src_tx is invalid: %v", err) - } - - b := transaction.UxBalance{ - Hash: hash, - Time: ro.Time, - BkSeq: ro.BkSeq, - SrcTransaction: srcTx, - Address: addr, - Coins: coins, - Hours: ro.CalculatedHours, - InitialHours: ro.Hours, - } - - uxb[i] = b - } - - return uxb, nil -} - -// UnspentOutputsSummary records unspent outputs in different status. -type UnspentOutputsSummary struct { - Head BlockHeader `json:"head"` - // HeadOutputs are unspent outputs confirmed in the blockchain - HeadOutputs UnspentOutputs `json:"head_outputs"` - // OutgoingOutputs are unspent outputs being spent in unconfirmed transactions - OutgoingOutputs UnspentOutputs `json:"outgoing_outputs"` - // IncomingOutputs are unspent outputs being created by unconfirmed transactions - IncomingOutputs UnspentOutputs `json:"incoming_outputs"` -} - -// NewUnspentOutputsSummary creates an UnspentOutputsSummary from visor.UnspentOutputsSummary -func NewUnspentOutputsSummary(summary *visor.UnspentOutputsSummary) (*UnspentOutputsSummary, error) { - headOutputs, err := NewUnspentOutputs(summary.Confirmed) - if err != nil { - return nil, err - } - - outgoingOutputs, err := NewUnspentOutputs(summary.Outgoing) - if err != nil { - return nil, err - } - - incomingOutputs, err := NewUnspentOutputs(summary.Incoming) - if err != nil { - return nil, err - } - - return &UnspentOutputsSummary{ - Head: NewBlockHeader(summary.HeadBlock.Head), - HeadOutputs: headOutputs, - OutgoingOutputs: outgoingOutputs, - IncomingOutputs: incomingOutputs, - }, nil -} - -// SpendableOutputs subtracts OutgoingOutputs from HeadOutputs -func (os UnspentOutputsSummary) SpendableOutputs() UnspentOutputs { - if len(os.OutgoingOutputs) == 0 { - return os.HeadOutputs - } - - spending := make(map[string]struct{}, len(os.OutgoingOutputs)) - for _, u := range os.OutgoingOutputs { - spending[u.Hash] = struct{}{} - } - - var outs UnspentOutputs - for i := range os.HeadOutputs { - if _, ok := spending[os.HeadOutputs[i].Hash]; !ok { - outs = append(outs, os.HeadOutputs[i]) - } - } - return outs -} - -// ExpectedOutputs adds IncomingOutputs to SpendableOutputs -func (os UnspentOutputsSummary) ExpectedOutputs() UnspentOutputs { - return append(os.SpendableOutputs(), os.IncomingOutputs...) -} - -// SpentOutput is an unspent output that was spent -type SpentOutput struct { - Uxid string `json:"uxid"` - Time uint64 `json:"time"` - SrcBkSeq uint64 `json:"src_block_seq"` - SrcTx string `json:"src_tx"` - OwnerAddress string `json:"owner_address"` - Coins uint64 `json:"coins"` - Hours uint64 `json:"hours"` - SpentBlockSeq uint64 `json:"spent_block_seq"` // block seq that spent the output. - SpentTxnID string `json:"spent_tx"` // id of tx which spent this output. -} - -// NewSpentOutput creates a SpentOutput from historydb.UxOut -func NewSpentOutput(out *historydb.UxOut) SpentOutput { - return SpentOutput{ - Uxid: out.Hash().Hex(), - Time: out.Out.Head.Time, - SrcBkSeq: out.Out.Head.BkSeq, - SrcTx: out.Out.Body.SrcTransaction.Hex(), - OwnerAddress: out.Out.Body.Address.String(), - Coins: out.Out.Body.Coins, - Hours: out.Out.Body.Hours, - SpentBlockSeq: out.SpentBlockSeq, - SpentTxnID: out.SpentTxnID.Hex(), - } -} - -// NewSpentOutputs creates []SpentOutput from []historydb.UxOut -func NewSpentOutputs(outs []historydb.UxOut) []SpentOutput { - spents := make([]SpentOutput, len(outs)) - for i, o := range outs { - spents[i] = NewSpentOutput(&o) - } - return spents -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/richlist.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/richlist.go deleted file mode 100644 index 87aa061..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/richlist.go +++ /dev/null @@ -1,32 +0,0 @@ -package readable - -import ( - "github.com/SkycoinProject/skycoin/src/util/droplet" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// RichlistBalance holds info an address balance holder -type RichlistBalance struct { - Address string `json:"address"` - Coins string `json:"coins"` - Locked bool `json:"locked"` -} - -// NewRichlistBalances copies from visor.Richlist -func NewRichlistBalances(visorRichlist visor.Richlist) ([]RichlistBalance, error) { - richlist := make([]RichlistBalance, len(visorRichlist)) - for i, v := range visorRichlist { - coins, err := droplet.ToString(v.Coins) - if err != nil { - return nil, err - } - - richlist[i] = RichlistBalance{ - Address: v.Address.String(), - Coins: coins, - Locked: v.Locked, - } - } - - return richlist, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/transaction.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/transaction.go deleted file mode 100644 index 2932671..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/transaction.go +++ /dev/null @@ -1,252 +0,0 @@ -package readable - -import ( - "errors" - "fmt" - "time" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/droplet" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/timeutil" - "github.com/SkycoinProject/skycoin/src/visor" -) - -var logger = logging.MustGetLogger("readable") - -// TransactionStatus represents the transaction status -type TransactionStatus struct { - Confirmed bool `json:"confirmed"` - Unconfirmed bool `json:"unconfirmed"` - // If confirmed, how many blocks deep in the chain it is. Will be at least 1 if confirmed - Height uint64 `json:"height"` - // If confirmed, the sequence of the block in which the transaction was executed - BlockSeq uint64 `json:"block_seq"` -} - -// NewTransactionStatus creates TransactionStatus from visor.TransactionStatus -func NewTransactionStatus(status visor.TransactionStatus) TransactionStatus { - return TransactionStatus{ - Unconfirmed: !status.Confirmed, - Confirmed: status.Confirmed, - Height: status.Height, - BlockSeq: status.BlockSeq, - } -} - -// TransactionOutput readable transaction output -type TransactionOutput struct { - Hash string `json:"uxid"` - Address string `json:"dst"` - Coins string `json:"coins"` - Hours uint64 `json:"hours"` -} - -// TransactionInput readable transaction input -type TransactionInput struct { - Hash string `json:"uxid"` - Address string `json:"owner"` - Coins string `json:"coins"` - Hours uint64 `json:"hours"` - CalculatedHours uint64 `json:"calculated_hours"` -} - -// NewTransactionOutput creates a TransactionOutput -func NewTransactionOutput(txn *coin.TransactionOutput, txid cipher.SHA256) (*TransactionOutput, error) { - coinStr, err := droplet.ToString(txn.Coins) - if err != nil { - return nil, err - } - - return &TransactionOutput{ - Hash: txn.UxID(txid).Hex(), - Address: txn.Address.String(), - Coins: coinStr, - Hours: txn.Hours, - }, nil -} - -// NewTransactionInput creates a TransactionInput from a visor.TransactionInput -func NewTransactionInput(input visor.TransactionInput) (TransactionInput, error) { - coinStr, err := droplet.ToString(input.UxOut.Body.Coins) - if err != nil { - logger.Errorf("Failed to convert coins to string: %v", err) - return TransactionInput{}, err - } - - return TransactionInput{ - Hash: input.UxOut.Hash().Hex(), - Address: input.UxOut.Body.Address.String(), - Coins: coinStr, - Hours: input.UxOut.Body.Hours, - CalculatedHours: input.CalculatedHours, - }, nil -} - -// Transaction represents a readable transaction -type Transaction struct { - Timestamp uint64 `json:"timestamp,omitempty"` - Length uint32 `json:"length"` - Type uint8 `json:"type"` - Hash string `json:"txid"` - InnerHash string `json:"inner_hash"` - - Sigs []string `json:"sigs"` - In []string `json:"inputs"` - Out []TransactionOutput `json:"outputs"` -} - -// NewTransaction creates a readable transaction -func NewTransaction(txn coin.Transaction, isGenesis bool) (*Transaction, error) { - if isGenesis && len(txn.In) != 0 { - return nil, errors.New("NewTransaction: isGenesis=true but Transaction.In is not empty") - } - if !isGenesis && len(txn.In) == 0 { - return nil, errors.New("NewTransaction: isGenesis=false but Transaction.In is empty") - } - - // Genesis transaction uses empty SHA256 as the txid for its outputs [FIXME: requires hardfork] - txID := txn.Hash() - txnOutputTxID := cipher.SHA256{} - if !isGenesis { - txnOutputTxID = txID - } - - sigs := make([]string, len(txn.Sigs)) - for i := range txn.Sigs { - sigs[i] = txn.Sigs[i].Hex() - } - - in := make([]string, len(txn.In)) - for i := range txn.In { - in[i] = txn.In[i].Hex() - } - - out := make([]TransactionOutput, len(txn.Out)) - for i := range txn.Out { - o, err := NewTransactionOutput(&txn.Out[i], txnOutputTxID) - if err != nil { - return nil, err - } - - out[i] = *o - } - - return &Transaction{ - Length: txn.Length, - Type: txn.Type, - Hash: txID.Hex(), - InnerHash: txn.InnerHash.Hex(), - - Sigs: sigs, - In: in, - Out: out, - }, nil -} - -// NewTransactionWithTimestamp creates a readable transaction with its timestamp set -func NewTransactionWithTimestamp(txn coin.Transaction, isGenesis bool, timestamp uint64) (*Transaction, error) { - newTxn, err := NewTransaction(txn, isGenesis) - if err != nil { - return nil, err - } - newTxn.Timestamp = timestamp - return newTxn, nil -} - -// UnconfirmedTransactions represents a readable unconfirmed transaction -type UnconfirmedTransactions struct { - Transaction Transaction `json:"transaction"` - Received time.Time `json:"received"` - Checked time.Time `json:"checked"` - Announced time.Time `json:"announced"` - IsValid bool `json:"is_valid"` -} - -// NewUnconfirmedTransaction creates a readable unconfirmed transaction -func NewUnconfirmedTransaction(unconfirmed *visor.UnconfirmedTransaction) (*UnconfirmedTransactions, error) { - isGenesis := false // unconfirmed transactions are never the genesis transaction - txn, err := NewTransaction(unconfirmed.Transaction, isGenesis) - if err != nil { - return nil, err - } - return &UnconfirmedTransactions{ - Transaction: *txn, - Received: timeutil.NanoToTime(unconfirmed.Received), - Checked: timeutil.NanoToTime(unconfirmed.Checked), - Announced: timeutil.NanoToTime(unconfirmed.Announced), - IsValid: unconfirmed.IsValid == 1, - }, nil -} - -// NewUnconfirmedTransactions converts []visor.UnconfirmedTransaction to []UnconfirmedTransactions -func NewUnconfirmedTransactions(txns []visor.UnconfirmedTransaction) ([]UnconfirmedTransactions, error) { - rut := make([]UnconfirmedTransactions, len(txns)) - for i := range txns { - txn, err := NewUnconfirmedTransaction(&txns[i]) - if err != nil { - return []UnconfirmedTransactions{}, err - } - rut[i] = *txn - } - return rut, nil -} - -// TransactionWithStatus represents transaction result -type TransactionWithStatus struct { - Status TransactionStatus `json:"status"` - Time uint64 `json:"time"` - Transaction Transaction `json:"txn"` -} - -// NewTransactionWithStatus converts visor.Transaction to TransactionWithStatus -func NewTransactionWithStatus(txn *visor.Transaction) (*TransactionWithStatus, error) { - if txn == nil { - return nil, nil - } - - isGenesis := txn.Status.BlockSeq == 0 && txn.Status.Confirmed - rbTxn, err := NewTransactionWithTimestamp(txn.Transaction, isGenesis, txn.Time) - if err != nil { - return nil, err - } - - return &TransactionWithStatus{ - Transaction: *rbTxn, - Status: NewTransactionStatus(txn.Status), - Time: txn.Time, - }, nil -} - -// TransactionWithStatusVerbose represents verbose transaction result -type TransactionWithStatusVerbose struct { - Status TransactionStatus `json:"status"` - Time uint64 `json:"time"` - Transaction TransactionVerbose `json:"txn"` -} - -// NewTransactionWithStatusVerbose converts visor.Transaction to TransactionWithStatusVerbose -func NewTransactionWithStatusVerbose(txn *visor.Transaction, inputs []visor.TransactionInput) (*TransactionWithStatusVerbose, error) { - if txn == nil { - return nil, nil - } - - if len(txn.Transaction.In) != len(inputs) { - return nil, fmt.Errorf("NewTransactionWithStatusVerbose: len(txn.In) != len(inputs) [%d != %d]", len(txn.Transaction.In), len(inputs)) - } - - rbTxn, err := NewTransactionVerbose(*txn, inputs) - if err != nil { - return nil, err - } - - // Force the Status field to be hidden on the inner transaction, to maintain API compatibility - rbTxn.Status = nil - - return &TransactionWithStatusVerbose{ - Transaction: rbTxn, - Status: NewTransactionStatus(txn.Status), - Time: txn.Time, - }, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/verbose.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/verbose.go deleted file mode 100644 index 1ad8279..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/verbose.go +++ /dev/null @@ -1,258 +0,0 @@ -package readable - -import ( - "errors" - "fmt" - "time" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/util/timeutil" - "github.com/SkycoinProject/skycoin/src/visor" -) - -// BlockBodyVerbose represents a verbose readable block body -type BlockBodyVerbose struct { - Transactions []BlockTransactionVerbose `json:"txns"` -} - -// BlockVerbose represents a readable block with verbose data -type BlockVerbose struct { - Head BlockHeader `json:"header"` - Body BlockBodyVerbose `json:"body"` - Size uint32 `json:"size"` -} - -// NewBlockBodyVerbose creates a verbose readable block body -func NewBlockBodyVerbose(b coin.Block, inputs [][]visor.TransactionInput) (*BlockBodyVerbose, error) { - if len(inputs) != len(b.Body.Transactions) { - return nil, fmt.Errorf("NewBlockBodyVerbose: len(inputs) != len(b.Body.Transactions) (seq=%d)", b.Head.BkSeq) - } - - txns := make([]BlockTransactionVerbose, len(b.Body.Transactions)) - for i := range b.Body.Transactions { - t := b.Body.Transactions[i] - - txn, err := NewBlockTransactionVerbose(t, inputs[i], b.Head.BkSeq == 0) - if err != nil { - return nil, err - } - txns[i] = txn - } - - return &BlockBodyVerbose{ - Transactions: txns, - }, nil -} - -// NewBlockVerbose creates a verbose readable block -func NewBlockVerbose(b coin.Block, inputs [][]visor.TransactionInput) (*BlockVerbose, error) { - body, err := NewBlockBodyVerbose(b, inputs) - if err != nil { - return nil, err - } - - size, err := b.Size() - if err != nil { - return nil, err - } - - return &BlockVerbose{ - Head: NewBlockHeader(b.Head), - Body: *body, - Size: size, - }, nil -} - -// BlocksVerbose an array of verbose readable blocks. -type BlocksVerbose struct { - Blocks []BlockVerbose `json:"blocks"` -} - -// NewBlocksVerbose creates BlocksVerbose from []BlockVerbose -func NewBlocksVerbose(blocks []coin.SignedBlock, inputs [][][]visor.TransactionInput) (*BlocksVerbose, error) { - bs := make([]BlockVerbose, len(blocks)) - for i := range blocks { - if i >= len(inputs) { - return nil, errors.New("NewBlocksVerbose: not enough inputs for blocks") - } - - b, err := NewBlockVerbose(blocks[i].Block, inputs[i]) - if err != nil { - return nil, err - } - - bs[i] = *b - } - - return &BlocksVerbose{ - Blocks: bs, - }, nil -} - -// BlockTransactionVerbose has readable transaction data for transactions inside a block. It differs from Transaction -// in that it includes metadata for transaction inputs and the calculated coinhour fee spent by the block -type BlockTransactionVerbose struct { - Length uint32 `json:"length"` - Type uint8 `json:"type"` - Hash string `json:"txid"` - InnerHash string `json:"inner_hash"` - Fee uint64 `json:"fee"` - - Sigs []string `json:"sigs"` - In []TransactionInput `json:"inputs"` - Out []TransactionOutput `json:"outputs"` -} - -// NewBlockTransactionVerbose creates BlockTransactionVerbose -func NewBlockTransactionVerbose(txn coin.Transaction, inputs []visor.TransactionInput, isGenesis bool) (BlockTransactionVerbose, error) { - if len(inputs) != len(txn.In) { - return BlockTransactionVerbose{}, errors.New("NewBlockTransactionVerbose: len(inputs) != len(txn.In)") - } - - // Genesis transaction uses empty SHA256 as txid - // FIXME: If/when the blockchain is regenerated, use a real hash as the txID for the genesis block. The bkSeq argument can be removed then. - txID := cipher.SHA256{} - if !isGenesis { - txID = txn.Hash() - } - - sigs := make([]string, len(txn.Sigs)) - for i, s := range txn.Sigs { - sigs[i] = s.Hex() - } - - out := make([]TransactionOutput, len(txn.Out)) - for i := range txn.Out { - o, err := NewTransactionOutput(&txn.Out[i], txID) - if err != nil { - return BlockTransactionVerbose{}, err - } - - out[i] = *o - } - - var hoursIn uint64 - for _, i := range inputs { - if _, err := mathutil.AddUint64(hoursIn, i.CalculatedHours); err != nil { - logger.Critical().Warningf("Ignoring NewBlockTransactionVerbose summing txn %s input hours error: %v", txID.Hex(), err) - } - hoursIn += i.CalculatedHours - } - - var hoursOut uint64 - for _, o := range txn.Out { - if _, err := mathutil.AddUint64(hoursOut, o.Hours); err != nil { - logger.Critical().Warningf("Ignoring NewBlockTransactionVerbose summing txn %s outputs hours error: %v", txID.Hex(), err) - } - - hoursOut += o.Hours - } - - var fee uint64 - if isGenesis { - if hoursIn != 0 { - err := errors.New("NewBlockTransactionVerbose genesis block should have 0 input hours") - return BlockTransactionVerbose{}, err - } - - fee = 0 - } else { - if hoursIn < hoursOut { - err := fmt.Errorf("NewBlockTransactionVerbose input hours is less than output hours, txid=%s", txID.Hex()) - return BlockTransactionVerbose{}, err - } - - fee = hoursIn - hoursOut - } - - txnInputs := make([]TransactionInput, len(inputs)) - for i, input := range inputs { - var err error - txnInputs[i], err = NewTransactionInput(input) - if err != nil { - return BlockTransactionVerbose{}, err - } - } - - return BlockTransactionVerbose{ - Length: txn.Length, - Type: txn.Type, - Hash: txn.Hash().Hex(), - InnerHash: txn.InnerHash.Hex(), - Fee: fee, - - Sigs: sigs, - In: txnInputs, - Out: out, - }, nil -} - -// TransactionVerbose has readable transaction data. It adds TransactionStatus to a BlockTransactionVerbose -type TransactionVerbose struct { - Status *TransactionStatus `json:"status,omitempty"` - Timestamp uint64 `json:"timestamp,omitempty"` - BlockTransactionVerbose -} - -// NewTransactionVerbose creates TransactionVerbose -func NewTransactionVerbose(txn visor.Transaction, inputs []visor.TransactionInput) (TransactionVerbose, error) { - rb, err := NewBlockTransactionVerbose(txn.Transaction, inputs, txn.Status.BlockSeq == 0 && txn.Status.Confirmed) - if err != nil { - return TransactionVerbose{}, err - } - - status := NewTransactionStatus(txn.Status) - - return TransactionVerbose{ - Status: &status, - Timestamp: txn.Time, - BlockTransactionVerbose: rb, - }, nil -} - -// UnconfirmedTransactionVerbose represents a verbose readable unconfirmed transaction -type UnconfirmedTransactionVerbose struct { - Transaction BlockTransactionVerbose `json:"transaction"` - Received time.Time `json:"received"` - Checked time.Time `json:"checked"` - Announced time.Time `json:"announced"` - IsValid bool `json:"is_valid"` -} - -// NewUnconfirmedTransactionVerbose creates a verbose readable unconfirmed transaction -func NewUnconfirmedTransactionVerbose(unconfirmed *visor.UnconfirmedTransaction, inputs []visor.TransactionInput) (*UnconfirmedTransactionVerbose, error) { - isGenesis := false // The genesis transaction is never unconfirmed - txn, err := NewBlockTransactionVerbose(unconfirmed.Transaction, inputs, isGenesis) - if err != nil { - return nil, err - } - - return &UnconfirmedTransactionVerbose{ - Transaction: txn, - Received: timeutil.NanoToTime(unconfirmed.Received), - Checked: timeutil.NanoToTime(unconfirmed.Checked), - Announced: timeutil.NanoToTime(unconfirmed.Announced), - IsValid: unconfirmed.IsValid == 1, - }, nil -} - -// NewUnconfirmedTransactionsVerbose creates []UnconfirmedTransactions from []UnconfirmedTransaction and their readable transaction inputs -func NewUnconfirmedTransactionsVerbose(txns []visor.UnconfirmedTransaction, inputs [][]visor.TransactionInput) ([]UnconfirmedTransactionVerbose, error) { - if len(inputs) != len(txns) { - return nil, fmt.Errorf("NewUnconfirmedTransactionsVerbose: len(inputs) != len(txns)") - } - - rTxns := make([]UnconfirmedTransactionVerbose, len(txns)) - for i, txn := range txns { - rTxn, err := NewUnconfirmedTransactionVerbose(&txn, inputs[i]) - if err != nil { - return nil, err - } - - rTxns[i] = *rTxn - } - - return rTxns, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/version.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/version.go deleted file mode 100644 index d85b197..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/version.go +++ /dev/null @@ -1,20 +0,0 @@ -package readable - -import "github.com/blang/semver" - -// BuildInfo represents the build info -type BuildInfo struct { - Version string `json:"version"` // version number - Commit string `json:"commit"` // git commit id - Branch string `json:"branch"` // git branch name -} - -// Semver returns the parsed semver.Version of the configured Version string -func (b BuildInfo) Semver() (*semver.Version, error) { - sv, err := semver.Make(b.Version) - if err != nil { - return nil, err - } - - return &sv, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/readable/wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/readable/wallet.go deleted file mode 100644 index 174c658..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/readable/wallet.go +++ /dev/null @@ -1,68 +0,0 @@ -package readable - -import ( - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -// Balance has coins and hours -type Balance struct { - Coins uint64 `json:"coins"` - Hours uint64 `json:"hours"` -} - -// NewBalance copies from wallet.Balance -func NewBalance(b wallet.Balance) Balance { - return Balance{ - Coins: b.Coins, - Hours: b.Hours, - } -} - -// BalancePair records the confirmed and predicted balance of an address -type BalancePair struct { - Confirmed Balance `json:"confirmed"` - Predicted Balance `json:"predicted"` // TODO rename "pending" -} - -// NewBalancePair copies from wallet.BalancePair -func NewBalancePair(bp wallet.BalancePair) BalancePair { - return BalancePair{ - Confirmed: NewBalance(bp.Confirmed), - Predicted: NewBalance(bp.Predicted), - } -} - -// AddressBalances represents a map of address balances -type AddressBalances map[string]BalancePair - -// NewAddressBalances copies from wallet.AddressBalances -func NewAddressBalances(wab wallet.AddressBalances) AddressBalances { - ab := make(AddressBalances, len(wab)) - for k, v := range wab { - ab[k] = NewBalancePair(v) - } - return ab -} - -// WalletEntry the wallet entry struct -type WalletEntry struct { - Address string `json:"address"` - Public string `json:"public_key"` - ChildNumber *uint32 `json:"child_number,omitempty"` // For bip32/44 - Change *uint32 `json:"change,omitempty"` // For bip44 -} - -// WalletMeta the wallet meta struct -type WalletMeta struct { - Coin wallet.CoinType `json:"coin"` - Filename string `json:"filename"` - Label string `json:"label"` - Type string `json:"type"` - Version string `json:"version"` - CryptoType wallet.CryptoType `json:"crypto_type"` - Timestamp int64 `json:"timestamp"` - Encrypted bool `json:"encrypted"` - Bip44Coin *bip44.CoinType `json:"bip44_coin,omitempty"` // For bip44 - XPub string `json:"xpub,omitempty"` // For xpub -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/testutil/testutil.go b/vendor/github.com/SkycoinProject/skycoin/src/testutil/testutil.go deleted file mode 100644 index 586be5b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/testutil/testutil.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Package testutil provides utility methods for testing -*/ -package testutil - -import ( - "crypto/rand" - "io/ioutil" - "os" - "testing" - - "github.com/boltdb/bolt" - "github.com/stretchr/testify/require" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip32" - "github.com/SkycoinProject/skycoin/src/cipher/bip39" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -// PrepareDB creates and opens a temporary test DB and returns it with a cleanup callback -func PrepareDB(t *testing.T) (*dbutil.DB, func()) { - f, err := ioutil.TempFile("", "testdb") - require.NoError(t, err) - - db, err := bolt.Open(f.Name(), 0700, nil) - require.NoError(t, err) - - return dbutil.WrapDB(db), func() { - err := db.Close() - if err != nil { - t.Logf("Failed to close database: %v", err) - } - - err = f.Close() - if err != nil { - t.Logf("Failed to close file: %v", err) - } - - err = os.Remove(f.Name()) - if err != nil { - t.Logf("Failed to remove temp file %s: %v", f.Name(), err) - } - } -} - -// RequireError requires that an error is not nil and that its message matches -func RequireError(t *testing.T, err error, msg string) { - t.Helper() - require.Error(t, err) - require.NotNil(t, err) - require.Equal(t, msg, err.Error()) -} - -// MakeAddress creates a cipher.Address -func MakeAddress() cipher.Address { - p, _ := cipher.GenerateKeyPair() - return cipher.AddressFromPubKey(p) -} - -// MakePubKey creates a cipher.PubKey -func MakePubKey() cipher.PubKey { - p, _ := cipher.GenerateKeyPair() - return p -} - -// RandBytes returns n random bytes -func RandBytes(t *testing.T, n int) []byte { - b := make([]byte, n) - _, err := rand.Read(b) - require.NoError(t, err) - return b -} - -// RandSHA256 returns a random SHA256 hash -func RandSHA256(t *testing.T) cipher.SHA256 { - return cipher.SumSHA256(RandBytes(t, 128)) -} - -// SHA256FromHex converts an SHA256 hex string to a cipher.SHA256 -func SHA256FromHex(t *testing.T, hex string) cipher.SHA256 { - sha, err := cipher.SHA256FromHex(hex) - require.NoError(t, err) - return sha -} - -// RandSig returns a random cipher.Sig -func RandSig(t *testing.T) cipher.Sig { - s, err := cipher.NewSig(RandBytes(t, 65)) - require.NoError(t, err) - return s -} - -// RequireFileExists requires that a file exists -func RequireFileExists(t *testing.T, fn string) os.FileInfo { - stat, err := os.Stat(fn) - require.NoError(t, err) - return stat -} - -// RequireFileNotExists requires that a file doesn't exist -func RequireFileNotExists(t *testing.T, fn string) { - _, err := os.Stat(fn) - require.True(t, os.IsNotExist(err)) -} - -// RandXPub creates a random xpub key -func RandXPub(t *testing.T) *bip32.PublicKey { - m := bip39.MustNewDefaultMnemonic() - s, err := bip39.NewSeed(m, "") - require.NoError(t, err) - c, err := bip44.NewCoin(s, bip44.CoinTypeSkycoin) - require.NoError(t, err) - x, err := c.Account(0) - require.NoError(t, err) - e, err := x.External() - require.NoError(t, err) - return e.PublicKey() -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/transaction/choose.go b/vendor/github.com/SkycoinProject/skycoin/src/transaction/choose.go deleted file mode 100644 index f090d0b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/transaction/choose.go +++ /dev/null @@ -1,278 +0,0 @@ -package transaction - -import ( - "bytes" - "errors" - "sort" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/fee" -) - -var ( - // ErrInsufficientBalance is returned if a wallet does not have enough balance for a spend - ErrInsufficientBalance = NewError(errors.New("balance is not sufficient")) - // ErrInsufficientHours is returned if a wallet does not have enough hours for a spend with requested hours - ErrInsufficientHours = NewError(errors.New("hours are not sufficient")) - // ErrZeroSpend is returned if a transaction is trying to spend 0 coins - ErrZeroSpend = NewError(errors.New("zero spend amount")) - // ErrNoUnspents is returned if a Create is called with no unspent outputs - ErrNoUnspents = NewError(errors.New("no unspents to spend")) -) - -// UxBalance is an intermediate representation of a UxOut for sorting and spend choosing -type UxBalance struct { - Hash cipher.SHA256 - BkSeq uint64 - Time uint64 - Address cipher.Address - Coins uint64 - InitialHours uint64 - Hours uint64 - SrcTransaction cipher.SHA256 -} - -// NewUxBalances converts coin.UxArray to []UxBalance. headTime is required to calculate coin hours. -func NewUxBalances(uxa coin.UxArray, headTime uint64) ([]UxBalance, error) { - uxb := make([]UxBalance, len(uxa)) - for i, ux := range uxa { - b, err := NewUxBalance(headTime, ux) - if err != nil { - return nil, err - } - uxb[i] = b - } - - return uxb, nil -} - -// NewUxBalance converts coin.UxOut to UxBalance. headTime is required to calculate coin hours. -func NewUxBalance(headTime uint64, ux coin.UxOut) (UxBalance, error) { - hours, err := ux.CoinHours(headTime) - if err != nil { - return UxBalance{}, err - } - - return UxBalance{ - Hash: ux.Hash(), - BkSeq: ux.Head.BkSeq, - Time: ux.Head.Time, - Address: ux.Body.Address, - Coins: ux.Body.Coins, - InitialHours: ux.Body.Hours, - Hours: hours, - SrcTransaction: ux.Body.SrcTransaction, - }, nil -} - -func uxBalancesSub(a, b []UxBalance) []UxBalance { - var x []UxBalance - - bMap := make(map[cipher.SHA256]struct{}, len(b)) - for _, i := range b { - bMap[i.Hash] = struct{}{} - } - - for _, i := range a { - if _, ok := bMap[i.Hash]; !ok { - x = append(x, i) - } - } - - return x -} - -// ChooseSpendsMinimizeUxOuts chooses uxout spends to satisfy an amount, using the least number of uxouts -// -- PRO: Allows more frequent spending, less waiting for confirmations, useful for exchanges. -// -- PRO: When transaction is volume is higher, transactions are prioritized by fee/size. Minimizing uxouts minimizes size. -// -- CON: Would make the unconfirmed pool grow larger. -// Users with high transaction frequency will want to use this so that they will not need to wait as frequently -// for unconfirmed spends to complete before sending more. -// Alternatively, or in addition to this, they should batch sends into single transactions. -func ChooseSpendsMinimizeUxOuts(uxa []UxBalance, coins, hours uint64) ([]UxBalance, error) { - return ChooseSpends(uxa, coins, hours, sortSpendsCoinsHighToLow) -} - -// sortSpendsCoinsHighToLow sorts uxout spends with highest balance to lowest -func sortSpendsCoinsHighToLow(uxa []UxBalance) { - sort.Slice(uxa, makeCmpUxOutByCoins(uxa, func(a, b uint64) bool { - return a > b - })) -} - -// ChooseSpendsMaximizeUxOuts chooses uxout spends to satisfy an amount, using the most number of uxouts -// See the pros and cons of ChooseSpendsMinimizeUxOuts. -// This should be the default mode, because this keeps the unconfirmed pool smaller which will allow -// the network to scale better. -func ChooseSpendsMaximizeUxOuts(uxa []UxBalance, coins, hours uint64) ([]UxBalance, error) { - return ChooseSpends(uxa, coins, hours, sortSpendsCoinsLowToHigh) -} - -// sortSpendsCoinsLowToHigh sorts uxout spends with lowest balance to highest -func sortSpendsCoinsLowToHigh(uxa []UxBalance) { - sort.Slice(uxa, makeCmpUxOutByCoins(uxa, func(a, b uint64) bool { - return a < b - })) -} - -// sortSpendsHoursLowToHigh sorts uxout spends with lowest hours to highest -func sortSpendsHoursLowToHigh(uxa []UxBalance) { - sort.Slice(uxa, makeCmpUxOutByHours(uxa, func(a, b uint64) bool { - return a < b - })) -} - -func makeCmpUxOutByCoins(uxa []UxBalance, coinsCmp func(a, b uint64) bool) func(i, j int) bool { - // Sort by: - // coins highest or lowest depending on coinsCmp - // hours lowest - // oldest first - // tie break with hash comparison - return func(i, j int) bool { - a := uxa[i] - b := uxa[j] - - if a.Coins == b.Coins { - if a.Hours == b.Hours { - if a.BkSeq == b.BkSeq { - return cmpUxBalanceByUxID(a, b) - } - return a.BkSeq < b.BkSeq - } - return a.Hours < b.Hours - } - return coinsCmp(a.Coins, b.Coins) - } -} - -func makeCmpUxOutByHours(uxa []UxBalance, hoursCmp func(a, b uint64) bool) func(i, j int) bool { - // Sort by: - // hours highest or lowest depending on hoursCmp - // coins lowest - // oldest first - // tie break with hash comparison - return func(i, j int) bool { - a := uxa[i] - b := uxa[j] - - if a.Hours == b.Hours { - if a.Coins == b.Coins { - if a.BkSeq == b.BkSeq { - return cmpUxBalanceByUxID(a, b) - } - return a.BkSeq < b.BkSeq - } - return a.Coins < b.Coins - } - return hoursCmp(a.Hours, b.Hours) - } -} - -func cmpUxBalanceByUxID(a, b UxBalance) bool { - cmp := bytes.Compare(a.Hash[:], b.Hash[:]) - if cmp == 0 { - logger.Panic("Duplicate UxOut when sorting") - } - return cmp < 0 -} - -// ChooseSpends chooses uxouts from a list of uxouts. -// It first chooses the uxout with the most number of coins that has nonzero coinhours. -// It then chooses uxouts with zero coinhours, ordered by sortStrategy -// It then chooses remaining uxouts with nonzero coinhours, ordered by sortStrategy -func ChooseSpends(uxa []UxBalance, coins, hours uint64, sortStrategy func([]UxBalance)) ([]UxBalance, error) { - if coins == 0 { - return nil, ErrZeroSpend - } - - if len(uxa) == 0 { - return nil, ErrNoUnspents - } - - for _, ux := range uxa { - if ux.Coins == 0 { - logger.Panic("UxOut coins are 0, can't spend") - return nil, errors.New("UxOut coins are 0, can't spend") - } - } - - // Split UxBalances into those with and without hours - var nonzero, zero []UxBalance - for _, ux := range uxa { - if ux.Hours == 0 { - zero = append(zero, ux) - } else { - nonzero = append(nonzero, ux) - } - } - - // Abort if there are no uxouts with non-zero coinhours, they can't be spent yet - if len(nonzero) == 0 { - return nil, fee.ErrTxnNoFee - } - - // Sort uxouts with hours lowest to highest and coins highest to lowest - sortSpendsCoinsHighToLow(nonzero) - - var haveCoins uint64 - var haveHours uint64 - var spending []UxBalance - - // Use the first nonzero output. This output will have the least hours possible - firstNonzero := nonzero[0] - if firstNonzero.Hours == 0 { - logger.Panic("balance has zero hours unexpectedly") - return nil, errors.New("balance has zero hours unexpectedly") - } - - nonzero = nonzero[1:] - - spending = append(spending, firstNonzero) - - haveCoins += firstNonzero.Coins - haveHours += firstNonzero.Hours - - if haveCoins >= coins && fee.RemainingHours(haveHours, params.UserVerifyTxn.BurnFactor) >= hours { - return spending, nil - } - - // Sort uxouts without hours according to the sorting strategy - sortStrategy(zero) - - for _, ux := range zero { - spending = append(spending, ux) - - haveCoins += ux.Coins - haveHours += ux.Hours - - if haveCoins >= coins { - break - } - } - - if haveCoins >= coins && fee.RemainingHours(haveHours, params.UserVerifyTxn.BurnFactor) >= hours { - return spending, nil - } - - // Sort remaining uxouts with hours according to the sorting strategy - sortStrategy(nonzero) - - for _, ux := range nonzero { - spending = append(spending, ux) - - haveCoins += ux.Coins - haveHours += ux.Hours - - if haveCoins >= coins && fee.RemainingHours(haveHours, params.UserVerifyTxn.BurnFactor) >= hours { - return spending, nil - } - } - - if haveCoins < coins { - return nil, ErrInsufficientBalance - } - - return nil, ErrInsufficientHours -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/transaction/create.go b/vendor/github.com/SkycoinProject/skycoin/src/transaction/create.go deleted file mode 100644 index 795bb67..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/transaction/create.go +++ /dev/null @@ -1,504 +0,0 @@ -/* -Package transaction provides methods for creating transactions - -See package coin for the Transaction object itself -*/ -package transaction - -import ( - "bytes" - "errors" - "fmt" - "sort" - - "github.com/shopspring/decimal" - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/fee" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -var ( - logger = logging.MustGetLogger("txn") -) - -// Create creates an unsigned transaction based upon Params. -// NOTE: Caller must ensure that auxs correspond to params.UxOuts options -// Outputs to spend are chosen from the pool of outputs provided. -// The outputs are chosen by the following procedure: -// - All outputs are merged into one list and are sorted coins highest, hours lowest, with the hash as a tiebreaker -// - Outputs are chosen from the beginning of this list, until the requested amount of coins is met. -// If hours are also specified, selection continues until the requested amount of hours are met. -// - If the total amount of coins in the chosen outputs is exactly equal to the requested amount of coins, -// such that there would be no change output but hours remain as change, another output will be chosen to create change, -// if the coinhour cost of adding that output is less than the coinhours that would be lost as change -// If receiving hours are not explicitly specified, hours are allocated amongst the receiving outputs proportional to the number of coins being sent to them. -// If the change address is not specified, the address whose bytes are lexically sorted first is chosen from the owners of the outputs being spent. -func Create(p Params, auxs coin.AddressUxOuts, headTime uint64) (*coin.Transaction, []UxBalance, error) { - return create(p, auxs, headTime, 0) -} - -func create(p Params, auxs coin.AddressUxOuts, headTime uint64, callCount int) (*coin.Transaction, []UxBalance, error) { - logger.WithFields(logrus.Fields{ - "params": p, - "nAuxs": len(auxs), - "headTime": headTime, - "callCount": callCount, - }).Info("create requested") - - if err := p.Validate(); err != nil { - return nil, nil, err - } - - txn := &coin.Transaction{} - - // Determine which unspents to spend - uxa := auxs.Flatten() - - uxb, err := NewUxBalances(uxa, headTime) - if err != nil { - return nil, nil, err - } - - // Reverse lookup set to recover the inputs - uxbMap := make(map[cipher.SHA256]UxBalance, len(uxb)) - for _, u := range uxb { - if _, ok := uxbMap[u.Hash]; ok { - return nil, nil, errors.New("Duplicate UxBalance in array") - } - uxbMap[u.Hash] = u - } - - // Calculate total coins and minimum hours to send - var totalOutCoins uint64 - var requestedHours uint64 - for _, to := range p.To { - totalOutCoins, err = mathutil.AddUint64(totalOutCoins, to.Coins) - if err != nil { - return nil, nil, NewError(fmt.Errorf("total output coins error: %v", err)) - } - - requestedHours, err = mathutil.AddUint64(requestedHours, to.Hours) - if err != nil { - return nil, nil, NewError(fmt.Errorf("total output hours error: %v", err)) - } - } - - // Use the MinimizeUxOuts strategy, to use least possible uxouts - // this will allow more frequent spending - // we don't need to check whether we have sufficient balance beforehand as ChooseSpends already checks that - spends, err := ChooseSpendsMinimizeUxOuts(uxb, totalOutCoins, requestedHours) - if err != nil { - return nil, nil, err - } - - // Calculate total coins and hours in spends - var totalInputCoins uint64 - var totalInputHours uint64 - for _, spend := range spends { - totalInputCoins, err = mathutil.AddUint64(totalInputCoins, spend.Coins) - if err != nil { - return nil, nil, err - } - - totalInputHours, err = mathutil.AddUint64(totalInputHours, spend.Hours) - if err != nil { - return nil, nil, err - } - - if err := txn.PushInput(spend.Hash); err != nil { - logger.Critical().WithError(err).Error("PushInput failed") - return nil, nil, err - } - } - - feeHours := fee.RequiredFee(totalInputHours, params.UserVerifyTxn.BurnFactor) - if feeHours == 0 { - // feeHours can only be 0 if totalInputHours is 0, and if totalInputHours was 0 - // then ChooseSpendsMinimizeUxOuts should have already returned an error - err := errors.New("Chosen spends have no coin hours, unexpectedly") - logger.Critical().WithError(err).WithField("totalInputHours", totalInputHours).Error() - return nil, nil, err - } - remainingHours := totalInputHours - feeHours - - switch p.HoursSelection.Type { - case HoursSelectionTypeManual: - for _, o := range p.To { - if err := txn.PushOutput(o.Address, o.Coins, o.Hours); err != nil { - logger.Critical().WithError(err).WithField("selectionType", HoursSelectionTypeManual).Error("PushOutput failed") - return nil, nil, err - } - } - - case HoursSelectionTypeAuto: - var addrHours []uint64 - - switch p.HoursSelection.Mode { - case HoursSelectionModeShare: - // multiply remaining hours after fee burn with share factor - hours, err := mathutil.Uint64ToInt64(remainingHours) - if err != nil { - return nil, nil, err - } - - allocatedHoursInt := p.HoursSelection.ShareFactor.Mul(decimal.New(hours, 0)).IntPart() - allocatedHours, err := mathutil.Int64ToUint64(allocatedHoursInt) - if err != nil { - return nil, nil, err - } - - toCoins := make([]uint64, len(p.To)) - for i, to := range p.To { - toCoins[i] = to.Coins - } - - addrHours, err = DistributeCoinHoursProportional(toCoins, allocatedHours) - if err != nil { - return nil, nil, err - } - default: - // This should have been caught by params.Validate() - logger.Panic("Invalid HoursSelection.Mode") - return nil, nil, errors.New("Invalid HoursSelection.Type") - } - - for i, out := range p.To { - out.Hours = addrHours[i] - if err := txn.PushOutput(out.Address, out.Coins, addrHours[i]); err != nil { - logger.Critical().WithError(err).WithField("selectionType", HoursSelectionTypeAuto).Error("PushOutput failed") - return nil, nil, err - } - } - - default: - // This should have been caught by params.Validate() - logger.Panic("Invalid HoursSelection.Type") - return nil, nil, errors.New("Invalid HoursSelection.Type") - } - - totalOutHours, err := txn.OutputHours() - if err != nil { - return nil, nil, err - } - - // Make sure we have enough coins and coin hours - // If we don't, and we called ChooseSpends, then ChooseSpends has a bug, as it should have returned this error already - if totalOutCoins > totalInputCoins { - logger.Critical().WithError(ErrInsufficientBalance).Error("Insufficient coins after choosing spends, this should not occur") - return nil, nil, ErrInsufficientBalance - } - - if totalOutHours > remainingHours { - logger.Critical().WithError(fee.ErrTxnInsufficientCoinHours).Error("Insufficient hours after choosing spends or distributing hours, this should not occur") - return nil, nil, fee.ErrTxnInsufficientCoinHours - } - - // Create change output - changeCoins := totalInputCoins - totalOutCoins - changeHours := remainingHours - totalOutHours - - logger.WithFields(logrus.Fields{ - "totalOutCoins": totalOutCoins, - "totalOutHours": totalOutHours, - "requestedHours": requestedHours, - "nUnspents": len(uxb), - "totalInputCoins": totalInputCoins, - "totalInputHours": totalInputHours, - "feeHours": feeHours, - "remainingHours": remainingHours, - "changeCoins": changeCoins, - "changeHours": changeHours, - "nSpends": len(spends), - "nInputs": len(txn.In), - }).Info("Calculated spend parameters") - - // If there are no change coins but there are change hours, try to add another - // input to save the change hours. - // This chooses an available input with the least number of coin hours; - // if the extra coin hour fee incurred by this additional input is less than - // the remaining coin hours, the input is added. - if changeCoins == 0 && changeHours > 0 { - logger.Info("Trying to recover change hours by forcing an extra input") - // Find the output with the least coin hours - // If size of the fee for this output is less than the changeHours, add it - // Update changeCoins and changeHours - z := uxBalancesSub(uxb, spends) - sortSpendsHoursLowToHigh(z) - if len(z) > 0 { - logger.Info("Extra input found, evaluating if it can recover change hours") - extra := z[0] - - // Calculate the new hours being spent - newTotalHours, err := mathutil.AddUint64(totalInputHours, extra.Hours) - if err != nil { - return nil, nil, err - } - - // Calculate the new fee for this new amount of hours - newFee := fee.RequiredFee(newTotalHours, params.UserVerifyTxn.BurnFactor) - if newFee < feeHours { - err := errors.New("updated fee after adding extra input for change is unexpectedly less than it was initially") - logger.WithError(err).Error() - return nil, nil, err - } - - // If the cost of adding this extra input is less than the amount of change hours we - // can save, use the input - additionalFee := newFee - feeHours - if additionalFee < changeHours { - logger.Info("Change hours can be recovered by forcing an extra input") - changeCoins = extra.Coins - - if extra.Hours < additionalFee { - err := errors.New("calculated additional fee is unexpectedly higher than the extra input's hours") - logger.WithError(err).Error() - return nil, nil, err - } - - additionalHours := extra.Hours - additionalFee - changeHours, err = mathutil.AddUint64(changeHours, additionalHours) - if err != nil { - return nil, nil, err - } - - spends = append(spends, extra) - - if err := txn.PushInput(extra.Hash); err != nil { - logger.Critical().WithError(err).Error("PushInput failed") - return nil, nil, err - } - - logger.WithFields(logrus.Fields{ - "changeCoins": changeCoins, - "changeHours": changeHours, - "nSpends": len(spends), - "nInputs": len(txn.In), - "newTotalHours": newTotalHours, - "newFee": "newFee", - "additionalFee": additionalFee, - "additionalHours": additionalHours, - }).Info("Recalculated spend parameters after forcing a change output") - } else { - logger.Info("Unable to recover change hours by forcing an extra input") - } - } else { - logger.Info("No more inputs left to use to recover change hours") - } - } - - // With auto share mode, if there are leftover hours and change couldn't be force-added, - // recalculate that share ratio at 100% - if changeCoins == 0 && changeHours > 0 && p.HoursSelection.Type == HoursSelectionTypeAuto && p.HoursSelection.Mode == HoursSelectionModeShare { - logger.Info("Recalculating share factor at 1.0 to avoid burning change hours") - oneDecimal := decimal.New(1, 0) - - if p.HoursSelection.ShareFactor.Equal(oneDecimal) { - err := errors.New("share factor is 1.0 but changeHours > 0 unexpectedly") - logger.Critical().WithError(err).Error() - return nil, nil, err - } - - // Double-check that we haven't already called create() once already - - // if for some reason the previous check fails, we'll end up in an infinite loop - if callCount > 0 { - err := errors.New("transaction.Create already fell back to share ratio 1.0") - logger.Critical().WithError(err).Error() - return nil, nil, err - } - - p.HoursSelection.ShareFactor = &oneDecimal - return create(p, auxs, headTime, 1) - } - - if changeCoins > 0 { - var changeAddress cipher.Address - if p.ChangeAddress != nil { - changeAddress = *p.ChangeAddress - } else { - // Choose a change address from the unspent outputs - // Sort spends by address, comparing bytes, and use the first - // This provides deterministic change address selection from a set of unspent outputs - if len(spends) == 0 { - return nil, nil, errors.New("spends is unexpectedly empty when choosing an automatic change address") - } - - addressBytes := make([][]byte, len(spends)) - for i, s := range spends { - addressBytes[i] = s.Address.Bytes() - } - - sort.Slice(addressBytes, func(i, j int) bool { - return bytes.Compare(addressBytes[i], addressBytes[j]) < 0 - }) - - var err error - changeAddress, err = cipher.AddressFromBytes(addressBytes[0]) - if err != nil { - logger.Critical().WithError(err).Error("cipher.AddressFromBytes failed for change address converted to bytes") - return nil, nil, err - } - - logger.WithField("addr", changeAddress).Info("Automatically selected a change address") - } - - logger.WithFields(logrus.Fields{ - "changeAddress": changeAddress, - "changeCoins": changeCoins, - "changeHours": changeHours, - }).Info("Adding a change output") - - if err := txn.PushOutput(changeAddress, changeCoins, changeHours); err != nil { - logger.Critical().WithError(err).Error("PushOutput failed") - return nil, nil, err - } - } - - // Initialize unsigned transaction - txn.Sigs = make([]cipher.Sig, len(txn.In)) - - if err := txn.UpdateHeader(); err != nil { - logger.Critical().WithError(err).Error("txn.UpdateHeader failed") - return nil, nil, err - } - - inputs := make([]UxBalance, len(txn.In)) - for i, h := range txn.In { - uxBalance, ok := uxbMap[h] - if !ok { - err := errors.New("Created transaction's input is not in the UxBalanceSet, this should not occur") - logger.Critical().WithError(err).Error() - return nil, nil, err - } - inputs[i] = uxBalance - } - - if err := verifyCreatedUnignedInvariants(p, txn, inputs); err != nil { - logger.Critical().WithError(err).Error("CreateTransaction created transaction that violates invariants, aborting") - return nil, nil, fmt.Errorf("Created transaction that violates invariants, this is a bug: %v", err) - } - - return txn, inputs, nil -} - -func verifyCreatedUnignedInvariants(p Params, txn *coin.Transaction, inputs []UxBalance) error { - if !txn.IsFullyUnsigned() { - return errors.New("Transaction is not fully unsigned") - } - - if err := VerifyCreatedInvariants(p, txn, inputs); err != nil { - return err - } - - return nil -} - -// VerifyCreatedInvariants checks that the transaction that was created matches expectations. -// Does not call visor verification methods because that causes import cycle due to the wallet package. -// daemon.Gateway checks that the transaction passes additional visor verification methods. -// TODO -- could fix the import cycle by having visor create the transaction, passing it to the wallet for verifying params and signing -// This method still compares some values of Params against the created txn and doesn't only verify that the txn is well formed -func VerifyCreatedInvariants(p Params, txn *coin.Transaction, inputs []UxBalance) error { - for _, o := range txn.Out { - // No outputs should be sent to the null address - if o.Address.Null() { - return errors.New("Output address is null") - } - - if o.Coins == 0 { - return errors.New("Output coins is 0") - } - } - - if len(txn.Out) != len(p.To) && len(txn.Out) != len(p.To)+1 { - return errors.New("Transaction has unexpected number of outputs") - } - - for i, o := range txn.Out[:len(p.To)] { - if o.Address != p.To[i].Address { - return errors.New("Output address does not match requested address") - } - - if o.Coins != p.To[i].Coins { - return errors.New("Output coins does not match requested coins") - } - - if p.To[i].Hours != 0 && o.Hours != p.To[i].Hours { - return errors.New("Output hours does not match requested hours") - } - } - - if len(txn.Sigs) != len(txn.In) { - return errors.New("Number of signatures does not match number of inputs") - } - - if len(txn.In) != len(inputs) { - return errors.New("Number of UxOut inputs does not match number of transaction inputs") - } - - for i, h := range txn.In { - if inputs[i].Hash != h { - return errors.New("Transaction input hash does not match UxOut inputs hash") - } - } - - inputsMap := make(map[cipher.SHA256]struct{}, len(inputs)) - - for _, i := range inputs { - if i.Hours < i.InitialHours { - return errors.New("Calculated input hours are unexpectedly less than the initial hours") - } - - if i.BkSeq == 0 { - if !i.SrcTransaction.Null() { - return errors.New("Input is the genesis UTXO but its source transaction hash is not null") - } - } else { - if i.SrcTransaction.Null() { - return errors.New("Input's source transaction hash is null") - } - } - - if i.Hash.Null() { - return errors.New("Input's hash is null") - } - - if _, ok := inputsMap[i.Hash]; ok { - return errors.New("Duplicate input in array") - } - - inputsMap[i.Hash] = struct{}{} - } - - var inputHours uint64 - for _, i := range inputs { - var err error - inputHours, err = mathutil.AddUint64(inputHours, i.Hours) - if err != nil { - return err - } - } - - var outputHours uint64 - for _, i := range txn.Out { - var err error - outputHours, err = mathutil.AddUint64(outputHours, i.Hours) - if err != nil { - return err - } - } - - if inputHours < outputHours { - return errors.New("Total input hours is less than the output hours") - } - - if inputHours-outputHours < fee.RequiredFee(inputHours, params.UserVerifyTxn.BurnFactor) { - return errors.New("Transaction will not satisfy required fee") - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/transaction/hours.go b/vendor/github.com/SkycoinProject/skycoin/src/transaction/hours.go deleted file mode 100644 index 93997ca..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/transaction/hours.go +++ /dev/null @@ -1,165 +0,0 @@ -package transaction - -import ( - "errors" - "math/big" - - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/fee" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -// DistributeSpendHours calculates how many coin hours to transfer to the change address and how -// many to transfer to each of the other destination addresses. -// Input hours are split by BurnFactor (rounded down) to meet the fee requirement. -// The remaining hours are split in half, one half goes to the change address -// and the other half goes to the destination addresses. -// If the remaining hours are an odd number, the change address gets the extra hour. -// If the amount assigned to the destination addresses is not perfectly divisible by the -// number of destination addresses, the extra hours are distributed to some of these addresses. -// Returns the number of hours to send to the change address, -// an array of length nAddrs with the hours to give to each destination address, -// and a sum of these values. -func DistributeSpendHours(inputHours, nAddrs uint64, haveChange bool) (uint64, []uint64, uint64) { - feeHours := fee.RequiredFee(inputHours, params.UserVerifyTxn.BurnFactor) - remainingHours := inputHours - feeHours - - var changeHours uint64 - if haveChange { - // Split the remaining hours between the change output and the other outputs - changeHours = remainingHours / 2 - - // If remainingHours is an odd number, give the extra hour to the change output - if remainingHours%2 == 1 { - changeHours++ - } - } - - // Distribute the remaining hours equally amongst the destination outputs - remainingAddrHours := remainingHours - changeHours - addrHoursShare := remainingAddrHours / nAddrs - - // Due to integer division, extra coin hours might remain after dividing by len(toAddrs) - // Allocate these extra hours to the toAddrs - addrHours := make([]uint64, nAddrs) - for i := range addrHours { - addrHours[i] = addrHoursShare - } - - extraHours := remainingAddrHours - (addrHoursShare * nAddrs) - i := 0 - for extraHours > 0 { - addrHours[i] = addrHours[i] + 1 - i++ - extraHours-- - } - - // Assert that the hour calculation is correct - var spendHours uint64 - for _, h := range addrHours { - spendHours += h - } - spendHours += changeHours - if spendHours != remainingHours { - logger.Panicf("spendHours != remainingHours (%d != %d), calculation error", spendHours, remainingHours) - } - - return changeHours, addrHours, spendHours -} - -// DistributeCoinHoursProportional distributes hours amongst coins proportional to the coins amount -func DistributeCoinHoursProportional(coins []uint64, hours uint64) ([]uint64, error) { - if len(coins) == 0 { - return nil, errors.New("DistributeCoinHoursProportional coins array must not be empty") - } - - coinsInt := make([]*big.Int, len(coins)) - - var total uint64 - for i, c := range coins { - if c == 0 { - return nil, errors.New("DistributeCoinHoursProportional coins array has a zero value") - } - - var err error - total, err = mathutil.AddUint64(total, c) - if err != nil { - return nil, err - } - - cInt64, err := mathutil.Uint64ToInt64(c) - if err != nil { - return nil, err - } - - coinsInt[i] = big.NewInt(cInt64) - } - - totalInt64, err := mathutil.Uint64ToInt64(total) - if err != nil { - return nil, err - } - totalInt := big.NewInt(totalInt64) - - hoursInt64, err := mathutil.Uint64ToInt64(hours) - if err != nil { - return nil, err - } - hoursInt := big.NewInt(hoursInt64) - - var assignedHours uint64 - addrHours := make([]uint64, len(coins)) - for i, c := range coinsInt { - // Scale the ratio of coins to total coins proportionally by calculating - // (coins * totalHours) / totalCoins - // The remainder is truncated, remaining hours are appended after this - num := &big.Int{} - num.Mul(c, hoursInt) - - fracInt := big.Int{} - fracInt.Div(num, totalInt) - - if !fracInt.IsUint64() { - return nil, errors.New("DistributeCoinHoursProportional calculated fractional hours is not representable as a uint64") - } - - fracHours := fracInt.Uint64() - - addrHours[i] = fracHours - assignedHours, err = mathutil.AddUint64(assignedHours, fracHours) - if err != nil { - return nil, err - } - } - - if hours < assignedHours { - return nil, errors.New("DistributeCoinHoursProportional assigned hours exceeding input hours, this is a bug") - } - - remainingHours := hours - assignedHours - - if remainingHours > uint64(len(coins)) { - return nil, errors.New("DistributeCoinHoursProportional remaining hours exceed len(coins), this is a bug") - } - - // For remaining hours lost due to fractional cutoff when scaling, - // first provide at least 1 coin hour to coins that were assigned 0. - i := 0 - for remainingHours > 0 && i < len(coins) { - if addrHours[i] == 0 { - addrHours[i] = 1 - remainingHours-- - } - i++ - } - - // Then, assign the extra coin hours - i = 0 - for remainingHours > 0 { - addrHours[i] = addrHours[i] + 1 - remainingHours-- - i++ - } - - return addrHours, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/transaction/params.go b/vendor/github.com/SkycoinProject/skycoin/src/transaction/params.go deleted file mode 100644 index 4285e6b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/transaction/params.go +++ /dev/null @@ -1,155 +0,0 @@ -package transaction - -import ( - "errors" - - "github.com/shopspring/decimal" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// Error wraps transaction creation-related errors. -// It wraps errors caused by user input, but not errors caused by programmer input or internal issues. -type Error struct { - error -} - -// NewError creates an Error -func NewError(err error) error { - if err == nil { - return nil - } - return Error{err} -} - -const ( - // HoursSelectionTypeManual is used to specify manual hours selection in advanced spend - HoursSelectionTypeManual = "manual" - // HoursSelectionTypeAuto is used to specify automatic hours selection in advanced spend - HoursSelectionTypeAuto = "auto" - - // HoursSelectionModeShare will distribute coin hours equally amongst destinations - HoursSelectionModeShare = "share" -) - -var ( - // ErrNullChangeAddress ChangeAddress must not be the null address - ErrNullChangeAddress = NewError(errors.New("ChangeAddress must not be the null address")) - // ErrMissingReceivers To is required - ErrMissingReceivers = NewError(errors.New("To is required")) - // ErrZeroCoinsReceiver To.Coins must not be zero - ErrZeroCoinsReceiver = NewError(errors.New("To.Coins must not be zero")) - // ErrNullAddressReceiver To.Address must not be the null address - ErrNullAddressReceiver = NewError(errors.New("To.Address must not be the null address")) - // ErrDuplicateReceiver To contains duplicate values - ErrDuplicateReceiver = NewError(errors.New("To contains duplicate values")) - // ErrReceiverZeroHoursAuto To.Hours must be zero for auto type hours selection - ErrReceiverZeroHoursAuto = NewError(errors.New("To.Hours must be zero for auto type hours selection")) - // ErrMissingHoursSelectionModeAuto HoursSelection.Mode is required for auto type hours selection - ErrMissingHoursSelectionModeAuto = NewError(errors.New("HoursSelection.Mode is required for auto type hours selection")) - // ErrInvalidHoursSelelectionMode Invalid HoursSelection.Mode - ErrInvalidHoursSelelectionMode = NewError(errors.New("Invalid HoursSelection.Mode")) - // ErrInvalidHoursSelectionModeManual HoursSelection.Mode cannot be used for manual type hours selection - ErrInvalidHoursSelectionModeManual = NewError(errors.New("HoursSelection.Mode cannot be used for manual type hours selection")) - // ErrInvalidHoursSelectionType Invalid HoursSelection.Type - ErrInvalidHoursSelectionType = NewError(errors.New("Invalid HoursSelection.Type")) - // ErrMissingShareFactor HoursSelection.ShareFactor must be set for share mode - ErrMissingShareFactor = NewError(errors.New("HoursSelection.ShareFactor must be set for share mode")) - // ErrInvalidShareFactor HoursSelection.ShareFactor can only be used for share mode - ErrInvalidShareFactor = NewError(errors.New("HoursSelection.ShareFactor can only be used for share mode")) - // ErrShareFactorOutOfRange HoursSelection.ShareFactor must be >= 0 and <= 1 - ErrShareFactorOutOfRange = NewError(errors.New("HoursSelection.ShareFactor must be >= 0 and <= 1")) -) - -// HoursSelection defines options for hours distribution -type HoursSelection struct { - Type string - Mode string - ShareFactor *decimal.Decimal -} - -// Params defines control parameters for transaction construction -type Params struct { - HoursSelection HoursSelection - To []coin.TransactionOutput - ChangeAddress *cipher.Address -} - -// Validate validates Params -func (c Params) Validate() error { - if c.ChangeAddress != nil && c.ChangeAddress.Null() { - return ErrNullChangeAddress - } - - if len(c.To) == 0 { - return ErrMissingReceivers - } - - for _, to := range c.To { - if to.Coins == 0 { - return ErrZeroCoinsReceiver - } - - if to.Address.Null() { - return ErrNullAddressReceiver - } - } - - // Check for duplicate outputs, a transaction can't have outputs with - // the same (address, coins, hours) - // Auto mode would distribute hours to the outputs and could hypothetically - // avoid assigning duplicate hours in many cases, but the complexity for doing - // so is very high, so also reject duplicate (address, coins) for auto mode. - outputs := make(map[coin.TransactionOutput]struct{}, len(c.To)) - for _, to := range c.To { - outputs[to] = struct{}{} - } - - if len(outputs) != len(c.To) { - return ErrDuplicateReceiver - } - - switch c.HoursSelection.Type { - case HoursSelectionTypeAuto: - for _, to := range c.To { - if to.Hours != 0 { - return ErrReceiverZeroHoursAuto - } - } - - switch c.HoursSelection.Mode { - case HoursSelectionModeShare: - case "": - return ErrMissingHoursSelectionModeAuto - default: - return ErrInvalidHoursSelelectionMode - } - - case HoursSelectionTypeManual: - if c.HoursSelection.Mode != "" { - return ErrInvalidHoursSelectionModeManual - } - - default: - return ErrInvalidHoursSelectionType - } - - if c.HoursSelection.ShareFactor == nil { - if c.HoursSelection.Mode == HoursSelectionModeShare { - return ErrMissingShareFactor - } - } else { - if c.HoursSelection.Mode != HoursSelectionModeShare { - return ErrInvalidShareFactor - } - - zero := decimal.New(0, 0) - one := decimal.New(1, 0) - if c.HoursSelection.ShareFactor.LessThan(zero) || c.HoursSelection.ShareFactor.GreaterThan(one) { - return ErrShareFactorOutOfRange - } - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/apputil/apputil.go b/vendor/github.com/SkycoinProject/skycoin/src/util/apputil/apputil.go new file mode 100644 index 0000000..caf088d --- /dev/null +++ b/vendor/github.com/SkycoinProject/skycoin/src/util/apputil/apputil.go @@ -0,0 +1,56 @@ +/* +Package apputil provides utility methods for cmd applications +*/ +package apputil + +import ( + "fmt" + "os" + "os/signal" + "runtime/pprof" + "syscall" +) + +// CatchInterrupt catches CTRL-C and closes the quit channel if it occurs. +// If CTRL-C is called again, the program stack is dumped and the process panics, +// so that shutdown hangs can be diagnosed. +func CatchInterrupt(quit chan<- struct{}) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + <-sigchan + signal.Stop(sigchan) + close(quit) + + // If ctrl-c is called again, panic so that the program state can be examined. + // Ctrl-c would be called again if program shutdown was stuck. + go CatchInterruptPanic() +} + +// CatchInterruptPanic catches os.Interrupt and panics +func CatchInterruptPanic() { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + <-sigchan + signal.Stop(sigchan) + PrintProgramStatus() + panic("SIGINT") +} + +// CatchDebug catches SIGUSR1 and prints internal program state +func CatchDebug() { + sigchan := make(chan os.Signal, 1) + //signal.Notify(sigchan, syscall.SIGUSR1) + signal.Notify(sigchan, syscall.Signal(0xa)) // SIGUSR1 = Signal(0xa) + for range sigchan { + PrintProgramStatus() + } +} + +// PrintProgramStatus prints all goroutine data to stdout +func PrintProgramStatus() { + p := pprof.Lookup("goroutine") + if err := p.WriteTo(os.Stdout, 2); err != nil { + fmt.Println("ERROR:", err) + return + } +} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/elapse/elapser.go b/vendor/github.com/SkycoinProject/skycoin/src/util/elapse/elapser.go deleted file mode 100644 index 2f692f3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/util/elapse/elapser.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Package elapse provides time measuring instruments -*/ -package elapse - -import ( - "time" - - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -// Elapser measures time elapsed for an operation. It is not thread-safe, use a different elapser per thread. -type Elapser struct { - name *string - startTime time.Time - elapsedThreshold time.Duration - Done chan bool - logger *logging.Logger -} - -// NewElapser creates an Elapser -func NewElapser(elapsedThreshold time.Duration, logger *logging.Logger) *Elapser { - elapser := &Elapser{ - elapsedThreshold: elapsedThreshold, - Done: make(chan bool, 100), - logger: logger, - } - return elapser -} - -// CheckForDone checks if the elapser has triggered and records the elapsed time -func (e *Elapser) CheckForDone() { - select { - case <-e.Done: - e.Elapsed() - default: - } -} - -// Register begins an operation to measure -func (e *Elapser) Register(name string) { - e.CheckForDone() - e.name = &name - e.startTime = time.Now() - e.Done <- true -} - -// ShowCurrentTime logs the elapsed time so far -func (e *Elapser) ShowCurrentTime(step string) { - stopTime := time.Now() - if e.name == nil { - e.logger.Warning("no registered events for elapsing, but found Elapser.ShowCurrentTime calling") - return - } - elapsed := stopTime.Sub(e.startTime) - e.logger.Infof("%s[%s] elapsed %s", *e.name, step, elapsed) - -} - -// Elapsed stops measuring an operation and logs the elapsed time if it exceeds the configured threshold -func (e *Elapser) Elapsed() { - stopTime := time.Now() - if e.name == nil { - e.logger.Warning("no registered events for elapsing, but found Elapser.Elapsed calling") - return - } - elapsed := stopTime.Sub(e.startTime) - if elapsed >= e.elapsedThreshold { - e.logger.Warningf("%s elapsed %s", *e.name, elapsed) - } - e.name = nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/fee/fee.go b/vendor/github.com/SkycoinProject/skycoin/src/util/fee/fee.go deleted file mode 100644 index 3d26873..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/util/fee/fee.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Package fee provides methods to calculate and verify transaction fees -*/ -package fee - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -var ( - // ErrTxnNoFee is returned if a transaction has no coinhour fee - ErrTxnNoFee = errors.New("Transaction has zero coinhour fee") - - // ErrTxnInsufficientFee is returned if a transaction's coinhour burn fee is not enough - ErrTxnInsufficientFee = errors.New("Transaction coinhour fee minimum not met") - - // ErrTxnInsufficientCoinHours is returned if a transaction has more coinhours in its outputs than its inputs - ErrTxnInsufficientCoinHours = errors.New("Insufficient coinhours for transaction outputs") -) - -// VerifyTransactionFee performs additional transaction verification at the unconfirmed pool level. -// This checks tunable params that should prevent the transaction from -// entering the blockchain, but cannot be done at the blockchain level because -// they may be changed. -func VerifyTransactionFee(t *coin.Transaction, fee uint64, burnFactor uint32) error { - hours, err := t.OutputHours() - if err != nil { - return err - } - return VerifyTransactionFeeForHours(hours, fee, burnFactor) -} - -// VerifyTransactionFeeForHours verifies the fee given fee and hours, -// where hours is the number of hours in a transaction's outputs, -// and hours+fee is the number of hours in a transaction's inputs -func VerifyTransactionFeeForHours(hours, fee uint64, burnFactor uint32) error { - // Require non-zero coinhour fee - if fee == 0 { - return ErrTxnNoFee - } - - // Calculate total number of coinhours - total, err := mathutil.AddUint64(hours, fee) - if err != nil { - return errors.New("Hours and fee overflow") - } - - // Calculate the required fee - requiredFee := RequiredFee(total, burnFactor) - - // Ensure that the required fee is met - if fee < requiredFee { - return ErrTxnInsufficientFee - } - - return nil -} - -// RequiredFee returns the coinhours fee required for an amount of hours -// The required fee is calculated as hours/burnFactor, rounded up. -func RequiredFee(hours uint64, burnFactor uint32) uint64 { - feeHours := hours / uint64(burnFactor) - if hours%uint64(burnFactor) != 0 { - feeHours++ - } - - return feeHours -} - -// RemainingHours returns the amount of coinhours leftover after paying the fee for the input. -func RemainingHours(hours uint64, burnFactor uint32) uint64 { - fee := RequiredFee(hours, burnFactor) - return hours - fee -} - -// TransactionFee calculates the current transaction fee in coinhours of a Transaction. -// Returns ErrTxnInsufficientCoinHours if input hours is less than output hours. -func TransactionFee(tx *coin.Transaction, headTime uint64, inUxs coin.UxArray) (uint64, error) { - // Compute input hours - inHours, err := inUxs.CoinHours(headTime) - if err != nil { - return 0, err - } - - // Compute output hours - outHours, err := tx.OutputHours() - if err != nil { - return 0, err - } - - if inHours < outHours { - return 0, ErrTxnInsufficientCoinHours - } - - return inHours - outHours, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/iputil/iputil.go b/vendor/github.com/SkycoinProject/skycoin/src/util/iputil/iputil.go deleted file mode 100644 index 1dccbd9..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/util/iputil/iputil.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Package iputil provides IP address related utility methods -*/ -package iputil - -import ( - "errors" - "net" - "strconv" -) - -var ( - // ErrMissingIP IP missing from ip:port string - ErrMissingIP = errors.New("IP missing from ip:port address") - // ErrInvalidPort port invalid in ip:port string - ErrInvalidPort = errors.New("Port invalid in ip:port address") - // ErrNoLocalIP no localhost IP found in system net interfaces - ErrNoLocalIP = errors.New("No local IP found") -) - -// LocalhostIP returns the address for localhost on the machine -func LocalhostIP() (string, error) { - tt, err := net.Interfaces() - if err != nil { - return "", err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return "", err - } - for _, a := range aa { - if ipnet, ok := a.(*net.IPNet); ok && ipnet.IP.IsLoopback() { - return ipnet.IP.String(), nil - } - } - } - return "", ErrNoLocalIP -} - -// IsLocalhost returns true if addr is a localhost address -// Works for both ipv4 and ipv6 addresses. -func IsLocalhost(addr string) bool { - return net.ParseIP(addr).IsLoopback() || addr == "localhost" -} - -// SplitAddr splits an ip:port string to ip, port. -// Works for both ipv4 and ipv6 addresses. -// If the IP is not specified, returns an error. -func SplitAddr(addr string) (string, uint16, error) { - ip, port, err := net.SplitHostPort(addr) - if err != nil { - return "", 0, err - } - - if ip == "" { - return "", 0, ErrMissingIP - } - - port64, err := strconv.ParseUint(port, 10, 16) - if err != nil { - return "", 0, ErrInvalidPort - } - - return ip, uint16(port64), nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/timeutil/timeutil.go b/vendor/github.com/SkycoinProject/skycoin/src/util/timeutil/timeutil.go deleted file mode 100644 index 7548684..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/util/timeutil/timeutil.go +++ /dev/null @@ -1,15 +0,0 @@ -/* -Package timeutil provides time related utility methods -*/ -package timeutil - -import "time" - -// NanoToTime converts nanoseconds to time.Time -func NanoToTime(n int64) time.Time { - zeroTime := time.Time{} - if n == zeroTime.UnixNano() { - return zeroTime - } - return time.Unix(n/int64(time.Second), n%int64(time.Second)) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/util/useragent/useragent.go b/vendor/github.com/SkycoinProject/skycoin/src/util/useragent/useragent.go deleted file mode 100644 index b338aed..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/util/useragent/useragent.go +++ /dev/null @@ -1,222 +0,0 @@ -// Package useragent implements methods for managing Skycoin user agents. -// -// A skycoin user agent has the following format: -// -// `$NAME:$VERSION[$GIT_HASH]($REMARK)` -// -// `$NAME` and `$VERSION` are required. -// -// * `$NAME` is the coin or application's name, e.g. `Skycoin`. It can contain the following characters: `A-Za-z0-9\-_+`. -// * `$VERSION` must be a valid [semver](http://semver.org/) version, e.g. `1.2.3` or `1.2.3-rc1`. -// Semver has the option of including build metadata such as the git commit hash, but this is not included by the default client. -// * `$REMARK` is optional. If not present, the enclosing brackets `()` should be omitted. -// It can contain the following characters: `A-Za-z0-9\-_+;:!$%,.=?~ ` (includes the space character). -package useragent - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/blang/semver" -) - -const ( - // IllegalChars are printable ascii characters forbidden from a user agent string. All other ascii or bytes are also forbidden. - IllegalChars = `<>&"'#@|{}` + "`" - // MaxLen the maximum length of a user agent - MaxLen = 256 - - // NamePattern is the regex pattern for the name portion of the user agent - NamePattern = `[A-Za-z0-9\-_+]+` - // VersionPattern is the regex pattern for the version portion of the user agent - VersionPattern = `[0-9]+\.[0-9]+\.[0-9][A-Za-z0-9\-.+]*` - // RemarkPattern is the regex pattern for the remark portion of the user agent - RemarkPattern = `[A-Za-z0-9\-_+;:!$%,.=?~ ]+` - - // Pattern is the regex pattern for the user agent in entirety - Pattern = `^(` + NamePattern + `):(` + VersionPattern + `)(\(` + RemarkPattern + `\))?$` -) - -var ( - illegalCharsSanitizeRe *regexp.Regexp - illegalCharsCheckRe *regexp.Regexp - re *regexp.Regexp - - // ErrIllegalChars user agent contains illegal characters - ErrIllegalChars = errors.New("User agent has invalid character(s)") - // ErrTooLong user agent exceeds a certain max length - ErrTooLong = errors.New("User agent is too long") - // ErrMalformed user agent does not match the user agent pattern - ErrMalformed = errors.New("User agent is malformed") - // ErrEmpty user agent is an empty string - ErrEmpty = errors.New("User agent is an empty string") -) - -func init() { - illegalCharsSanitizeRe = regexp.MustCompile(fmt.Sprintf("([^[:print:]]|[%s])+", IllegalChars)) - illegalCharsCheckRe = regexp.MustCompile(fmt.Sprintf("[^[:print:]]|[%s]", IllegalChars)) - re = regexp.MustCompile(Pattern) -} - -// Data holds parsed user agent data -type Data struct { - Coin string - Version string - Remark string -} - -// Empty returns true if the Data is empty -func (d Data) Empty() bool { - return d == (Data{}) -} - -// Build builds a user agent string. Returns an error if the user agent would be invalid. -func (d Data) Build() (string, error) { - if d.Coin == "" { - return "", errors.New("missing coin name") - } - if d.Version == "" { - return "", errors.New("missing version") - } - - _, err := semver.Parse(d.Version) - if err != nil { - return "", err - } - - s := d.build() - - if err := validate(s); err != nil { - return "", err - } - - d2, err := Parse(s) - if err != nil { - return "", fmt.Errorf("Built a user agent that fails to parse: %q %v", s, err) - } - - if d2 != d { - return "", errors.New("Built a user agent that does not parse to the original format") - } - - return s, nil -} - -// MustBuild calls Build and panics on error -func (d Data) MustBuild() string { - s, err := d.Build() - if err != nil { - panic(err) - } - return s -} - -func (d Data) build() string { - if d.Coin == "" || d.Version == "" { - return "" - } - - remark := d.Remark - if remark != "" { - remark = fmt.Sprintf("(%s)", remark) - } - - return fmt.Sprintf("%s:%s%s", d.Coin, d.Version, remark) -} - -// MarshalJSON marshals Data as JSON -func (d Data) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.build())), nil -} - -// UnmarshalJSON unmarshals []byte to Data -func (d *Data) UnmarshalJSON(v []byte) error { - var s string - if err := json.Unmarshal(v, &s); err != nil { - return err - } - - if s == "" { - return nil - } - - parsed, err := Parse(s) - if err != nil { - return err - } - - *d = parsed - return nil -} - -// Parse parses a user agent string to Data -func Parse(userAgent string) (Data, error) { - if len(userAgent) == 0 { - return Data{}, ErrEmpty - } - - if err := validate(userAgent); err != nil { - return Data{}, err - } - - subs := re.FindAllStringSubmatch(userAgent, -1) - - if len(subs) == 0 { - return Data{}, ErrMalformed - } - - m := subs[0] - - if m[0] != userAgent { - // This should not occur since the pattern has ^$ boundaries applied, but just in case - return Data{}, errors.New("User agent did not match pattern completely") - } - - coin := m[1] - version := m[2] - remark := m[3] - - if _, err := semver.Parse(version); err != nil { - return Data{}, fmt.Errorf("User agent version is not a valid semver: %v", err) - } - - remark = strings.TrimPrefix(remark, "(") - remark = strings.TrimSuffix(remark, ")") - - return Data{ - Coin: coin, - Version: version, - Remark: remark, - }, nil -} - -// MustParse parses and panics on error -func MustParse(userAgent string) Data { - d, err := Parse(userAgent) - if err != nil { - panic(err) - } - - return d -} - -// validate validates a user agent string. The user agent must not contain illegal characters. -func validate(userAgent string) error { - if len(userAgent) > MaxLen { - return ErrTooLong - } - - if illegalCharsCheckRe.MatchString(userAgent) { - return ErrIllegalChars - } - - return nil -} - -// Sanitize removes illegal characters from a user agent string -func Sanitize(userAgent string) string { - return illegalCharsSanitizeRe.ReplaceAllLiteralString(userAgent, "") -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockchain.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockchain.go deleted file mode 100644 index 0d251b8..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockchain.go +++ /dev/null @@ -1,852 +0,0 @@ -package visor - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/fee" - "github.com/SkycoinProject/skycoin/src/visor/blockdb" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" - "github.com/SkycoinProject/skycoin/src/visor/historydb" -) - -const ( - // DebugLevel1 checks for extremely unlikely conditions (10e-40) - DebugLevel1 = true - // DebugLevel2 enable checks for impossible conditions - DebugLevel2 = true -) - -var ( - // ErrVerifyStopped is returned when database verification is interrupted - ErrVerifyStopped = errors.New("database verification stopped") -) - -// ErrBlockNotExist may be returned if a block is not found -type ErrBlockNotExist struct { - Seq uint64 -} - -// NewErrBlockNotExist creates an ErrBlockNotExist based on an unknown block sequence -func NewErrBlockNotExist(seq uint64) ErrBlockNotExist { - return ErrBlockNotExist{ - Seq: seq, - } -} - -func (e ErrBlockNotExist) Error() string { - return fmt.Sprintf("block does not exist seq=%d", e.Seq) -} - -//Warning: 10e6 is 10 million, 1e6 is 1 million - -// Note: DebugLevel1 adds additional checks for hash collisions that -// are unlikely to occur. DebugLevel2 adds checks for conditions that -// can only occur through programmer error and malice. - -// Note: a droplet is the base coin unit. Each Skycoin is one million droplets - -//Termonology: -// UXTO - unspent transaction outputs -// UX - outputs10 -// TX - transactions - -//Notes: -// transactions (TX) consume outputs (UX) and produce new outputs (UX) -// Tx.Uxi() - set of outputs consumed by transaction -// Tx.Uxo() - set of outputs created by transaction - -// chainStore -type chainStore interface { - Head(*dbutil.Tx) (*coin.SignedBlock, error) - HeadSeq(*dbutil.Tx) (uint64, bool, error) - Len(*dbutil.Tx) (uint64, error) - AddBlock(*dbutil.Tx, *coin.SignedBlock) error - GetBlockByHash(*dbutil.Tx, cipher.SHA256) (*coin.Block, error) - GetSignedBlockByHash(*dbutil.Tx, cipher.SHA256) (*coin.SignedBlock, error) - GetSignedBlockBySeq(*dbutil.Tx, uint64) (*coin.SignedBlock, error) - UnspentPool() blockdb.UnspentPooler - GetGenesisBlock(*dbutil.Tx) (*coin.SignedBlock, error) - GetBlockSignature(*dbutil.Tx, *coin.Block) (cipher.Sig, bool, error) - ForEachBlock(*dbutil.Tx, func(*coin.Block) error) error -} - -// DefaultWalker default blockchain walker -func DefaultWalker(tx *dbutil.Tx, hps []coin.HashPair) (cipher.SHA256, bool) { - if len(hps) == 0 { - return cipher.SHA256{}, false - } - return hps[0].Hash, true -} - -// CreateBuckets creates the buckets used by the blockdb -func CreateBuckets(db *dbutil.DB) error { - return db.Update("CreateBuckets", func(tx *dbutil.Tx) error { - if err := historydb.CreateBuckets(tx); err != nil { - return err - } - - if err := blockdb.CreateBuckets(tx); err != nil { - return err - } - - return dbutil.CreateBuckets(tx, [][]byte{ - UnconfirmedTxnsBkt, - UnconfirmedUnspentsBkt, - }) - }) -} - -// BlockchainConfig configures Blockchain options -type BlockchainConfig struct { - // Arbitrating mode: if in arbitrating mode, when block publishing node execute blocks, - // the invalid transaction will be skipped and continue the next; otherwise, - // node will throw the error and return. - Arbitrating bool - Pubkey cipher.PubKey -} - -// Blockchain maintains blockchain and provides apis for accessing the chain. -type Blockchain struct { - db *dbutil.DB - cfg BlockchainConfig - store chainStore -} - -// NewBlockchain creates a Blockchain -func NewBlockchain(db *dbutil.DB, cfg BlockchainConfig) (*Blockchain, error) { - chainstore, err := blockdb.NewBlockchain(db, DefaultWalker) - if err != nil { - return nil, err - } - - return &Blockchain{ - cfg: cfg, - db: db, - store: chainstore, - }, nil -} - -// GetGenesisBlock returns genesis block -func (bc *Blockchain) GetGenesisBlock(tx *dbutil.Tx) (*coin.SignedBlock, error) { - return bc.store.GetGenesisBlock(tx) -} - -// GetSignedBlockByHash returns block of given hash -func (bc *Blockchain) GetSignedBlockByHash(tx *dbutil.Tx, hash cipher.SHA256) (*coin.SignedBlock, error) { - return bc.store.GetSignedBlockByHash(tx, hash) -} - -// GetSignedBlockBySeq returns block of given seq -func (bc *Blockchain) GetSignedBlockBySeq(tx *dbutil.Tx, seq uint64) (*coin.SignedBlock, error) { - return bc.store.GetSignedBlockBySeq(tx, seq) -} - -// Head returns the most recent confirmed block -func (bc Blockchain) Head(tx *dbutil.Tx) (*coin.SignedBlock, error) { - return bc.store.Head(tx) -} - -// Unspent returns the unspent outputs pool -func (bc *Blockchain) Unspent() blockdb.UnspentPooler { - return bc.store.UnspentPool() -} - -// Len returns the length of current blockchain. -func (bc Blockchain) Len(tx *dbutil.Tx) (uint64, error) { - return bc.store.Len(tx) -} - -// HeadSeq returns the sequence of head block -func (bc *Blockchain) HeadSeq(tx *dbutil.Tx) (uint64, bool, error) { - return bc.store.HeadSeq(tx) -} - -// Time returns time of last block -// used as system clock indepedent clock for coin hour calculations -// TODO: Deprecate -func (bc *Blockchain) Time(tx *dbutil.Tx) (uint64, error) { - b, err := bc.Head(tx) - if err != nil { - if err == blockdb.ErrNoHeadBlock { - return 0, nil - } - return 0, err - } - - return b.Time(), nil -} - -// NewBlock creates a Block given an array of Transactions. -// Only hard constraints are applied to transactions in the block. -// The caller of this function should apply any additional soft constraints, -// and choose which transactions to place into the block. -func (bc Blockchain) NewBlock(tx *dbutil.Tx, txns coin.Transactions, currentTime uint64) (*coin.Block, error) { - if len(txns) == 0 { - return nil, errors.New("No transactions") - } - - head, err := bc.store.Head(tx) - if err != nil { - return nil, err - } - - if currentTime <= head.Time() { - return nil, errors.New("Time can only move forward") - } - - txns, err = bc.processTransactions(tx, txns) - if err != nil { - return nil, err - } - - uxHash, err := bc.Unspent().GetUxHash(tx) - if err != nil { - return nil, err - } - - feeCalc := bc.TransactionFee(tx, head.Time()) - - b, err := coin.NewBlock(head.Block, currentTime, uxHash, txns, feeCalc) - if err != nil { - return nil, err - } - - // make sure block is valid - if DebugLevel2 { - if err := bc.verifyBlockHeader(tx, *b); err != nil { - return nil, err - } - txns, err := bc.processTransactions(tx, b.Body.Transactions) - if err != nil { - logger.Panicf("bc.processTransactions second verification call failed: %v", err) - } - b.Body.Transactions = txns - } - return b, nil -} - -func (bc *Blockchain) processBlock(tx *dbutil.Tx, b coin.SignedBlock) (coin.SignedBlock, error) { - length, err := bc.Len(tx) - if err != nil { - return coin.SignedBlock{}, err - } - - if length > 0 { - if isGenesis, err := bc.isGenesisBlock(tx, b.Block); err != nil { - return coin.SignedBlock{}, err - } else if isGenesis { - err := errors.New("Attempted to process genesis block after blockchain has genesis block") - logger.Warning(err.Error()) - return coin.SignedBlock{}, err - } else { - if err := bc.verifyBlockHeader(tx, b.Block); err != nil { - return coin.SignedBlock{}, err - } - - txns, err := bc.processTransactions(tx, b.Body.Transactions) - if err != nil { - return coin.SignedBlock{}, err - } - b.Body.Transactions = txns - - if err := bc.verifyUxHash(tx, b.Block); err != nil { - return coin.SignedBlock{}, err - } - - } - } - - return b, nil -} - -// ExecuteBlock attempts to append block to blockchain with *dbutil.Tx -func (bc *Blockchain) ExecuteBlock(tx *dbutil.Tx, sb *coin.SignedBlock) error { - length, err := bc.Len(tx) - if err != nil { - return err - } - - if length > 0 { - head, err := bc.Head(tx) - if err != nil { - return err - } - - // TODO -- why do we modify the block here? - sb.Head.PrevHash = head.HashHeader() - } - - nb, err := bc.processBlock(tx, *sb) - if err != nil { - return err - } - - if err := bc.store.AddBlock(tx, &nb); err != nil { - return err - } - - return nil -} - -// VerifyBlock verifies specified block against current state of blockchain. -func (bc *Blockchain) VerifyBlock(tx *dbutil.Tx, sb *coin.SignedBlock) error { - _, err := bc.processBlock(tx, *sb) - - return err -} - -// isGenesisBlock checks if the block is genesis block -func (bc Blockchain) isGenesisBlock(tx *dbutil.Tx, b coin.Block) (bool, error) { - gb, err := bc.store.GetGenesisBlock(tx) - if err != nil { - return false, err - } - if gb == nil { - return false, nil - } - - return gb.HashHeader() == b.HashHeader(), nil -} - -// Compares the state of the current UxHash hash to state of unspent -// output pool. -func (bc Blockchain) verifyUxHash(tx *dbutil.Tx, b coin.Block) error { - uxHash, err := bc.Unspent().GetUxHash(tx) - if err != nil { - return err - } - - if !bytes.Equal(b.Head.UxHash[:], uxHash[:]) { - return errors.New("UxHash does not match") - } - - return nil -} - -// VerifyBlockTxnConstraints checks that the transaction does not violate hard constraints, -// for transactions that are already included in a block. -func (bc Blockchain) VerifyBlockTxnConstraints(tx *dbutil.Tx, txn coin.Transaction) error { - // NOTE: Unspent().GetArray() returns an error if not all txn.In can be found - // This prevents double spends - uxIn, err := bc.Unspent().GetArray(tx, txn.In) - if err != nil { - switch err.(type) { - case blockdb.ErrUnspentNotExist: - return NewErrTxnViolatesHardConstraint(err) - default: - return err - } - } - - head, err := bc.Head(tx) - if err != nil { - return err - } - - return bc.verifyBlockTxnHardConstraints(tx, txn, head, uxIn) -} - -func (bc Blockchain) verifyBlockTxnHardConstraints(tx *dbutil.Tx, txn coin.Transaction, head *coin.SignedBlock, uxIn coin.UxArray) error { - if err := VerifyBlockTxnConstraints(txn, head.Head, uxIn); err != nil { - return err - } - - if DebugLevel1 { - // Check that new unspents don't collide with existing. - // This should not occur but is a sanity check. - // NOTE: this is not in the top-level VerifyBlockTxnConstraints - // because it relies on the unspent pool to check for existence. - // For remote callers such as the CLI, they'd need to download the whole - // unspent pool or make a separate API call to check for duplicate unspents. - uxOuts := coin.CreateUnspents(head.Head, txn) - for i := range uxOuts { - if contains, err := bc.Unspent().Contains(tx, uxOuts[i].Hash()); err != nil { - return err - } else if contains { - err := errors.New("New unspent collides with existing unspent") - return NewErrTxnViolatesHardConstraint(err) - } - } - } - - return nil -} - -// VerifySingleTxnHardConstraints checks that the transaction does not violate hard constraints. -// for transactions that are not included in a block. -func (bc Blockchain) VerifySingleTxnHardConstraints(tx *dbutil.Tx, txn coin.Transaction, signed TxnSignedFlag) error { - // NOTE: Unspent().GetArray() returns an error if not all txn.In can be found - // This prevents double spends - uxIn, err := bc.Unspent().GetArray(tx, txn.In) - if err != nil { - switch err.(type) { - case blockdb.ErrUnspentNotExist: - return NewErrTxnViolatesHardConstraint(err) - default: - return err - } - } - - head, err := bc.Head(tx) - if err != nil { - return err - } - - return bc.verifySingleTxnHardConstraints(tx, txn, head, uxIn, signed) -} - -// VerifySingleTxnSoftHardConstraints checks that the transaction does not violate hard or soft constraints, -// for transactions that are not included in a block. -// Hard constraints are checked before soft constraints. -func (bc Blockchain) VerifySingleTxnSoftHardConstraints(tx *dbutil.Tx, txn coin.Transaction, distParams params.Distribution, verifyParams params.VerifyTxn, signed TxnSignedFlag) (*coin.SignedBlock, coin.UxArray, error) { - // NOTE: Unspent().GetArray() returns an error if not all txn.In can be found - // This prevents double spends - uxIn, err := bc.Unspent().GetArray(tx, txn.In) - if err != nil { - return nil, nil, NewErrTxnViolatesHardConstraint(err) - } - - head, err := bc.Head(tx) - if err != nil { - return nil, nil, err - } - - // Hard constraints must be checked before soft constraints - if err := bc.verifySingleTxnHardConstraints(tx, txn, head, uxIn, signed); err != nil { - return nil, nil, err - } - - if err := VerifySingleTxnSoftConstraints(txn, head.Time(), uxIn, distParams, verifyParams); err != nil { - return nil, nil, err - } - - return head, uxIn, nil -} - -func (bc Blockchain) verifySingleTxnHardConstraints(tx *dbutil.Tx, txn coin.Transaction, head *coin.SignedBlock, uxIn coin.UxArray, signed TxnSignedFlag) error { - if err := VerifySingleTxnHardConstraints(txn, head.Head, uxIn, signed); err != nil { - return err - } - - if DebugLevel1 { - // Check that new unspents don't collide with existing. - // This should not occur but is a sanity check. - // NOTE: this is not in the top-level VerifySingleTxnHardConstraints - // because it relies on the unspent pool to check for existence. - // For remote callers such as the CLI, they'd need to download the whole - // unspent pool or make a separate API call to check for duplicate unspents. - uxOuts := coin.CreateUnspents(head.Head, txn) - for i := range uxOuts { - if contains, err := bc.Unspent().Contains(tx, uxOuts[i].Hash()); err != nil { - return err - } else if contains { - err := errors.New("New unspent collides with existing unspent") - return NewErrTxnViolatesHardConstraint(err) - } - } - } - - return nil -} - -// GetBlocks returns blocks matching seqs. If any block is not found, returns an error. -func (bc Blockchain) GetBlocks(tx *dbutil.Tx, seqs []uint64) ([]coin.SignedBlock, error) { - blocks := make([]coin.SignedBlock, len(seqs)) - - for i, s := range seqs { - b, err := bc.store.GetSignedBlockBySeq(tx, s) - if err != nil { - return nil, err - } - - if b == nil { - return nil, NewErrBlockNotExist(s) - } - - blocks[i] = *b - } - - return blocks, nil -} - -// GetBlocksInRange return blocks whose seq are in the range of start and end. -func (bc Blockchain) GetBlocksInRange(tx *dbutil.Tx, start, end uint64) ([]coin.SignedBlock, error) { - if start > end { - return nil, nil - } - - var blocks []coin.SignedBlock - for i := start; i <= end; i++ { - b, err := bc.store.GetSignedBlockBySeq(tx, i) - if err != nil { - logger.WithError(err).Error("bc.store.GetSignedBlockBySeq failed") - return nil, err - } - - if b == nil { - break - } - - blocks = append(blocks, *b) - } - - return blocks, nil -} - -// GetLastBlocks return the latest N blocks. -func (bc Blockchain) GetLastBlocks(tx *dbutil.Tx, num uint64) ([]coin.SignedBlock, error) { - if num == 0 { - return nil, nil - } - - end, ok, err := bc.HeadSeq(tx) - if err != nil { - return nil, err - } - if !ok { - return nil, nil - } - - start := int(end-num) + 1 - if start < 0 { - start = 0 - } - - return bc.GetBlocksInRange(tx, uint64(start), end) -} - -/* Private */ - -// Validates a set of Transactions, individually, against each other and -// against the Blockchain. If firstFail is true, it will return an error -// as soon as it encounters one. Else, it will return an array of -// Transactions that are valid as a whole. It may return an error if -// firstFalse is false, if there is no way to filter the txns into a valid -// array, i.e. processTransactions(processTransactions(txn, false), true) -// should not result in an error, unless all txns are invalid. -// TODO: -// - move arbitration to visor -// - blockchain should have strict checking -func (bc Blockchain) processTransactions(tx *dbutil.Tx, txs coin.Transactions) (coin.Transactions, error) { - // copy txs so that the following code won't modify the original txns - txns := make(coin.Transactions, len(txs)) - copy(txns, txs) - - head, err := bc.store.Head(tx) - if err != nil { - return nil, err - } - - // Transactions need to be sorted by fee and hash before arbitrating - if bc.cfg.Arbitrating { - txns, err = coin.SortTransactions(txns, bc.TransactionFee(tx, head.Time())) - if err != nil { - logger.Critical().WithError(err).Error("processTransactions: coin.SortTransactions failed") - return nil, err - } - } - - //TODO: audit - if len(txns) == 0 { - if bc.cfg.Arbitrating { - return txns, nil - } - - // If there are no transactions, a block should not be made - return nil, errors.New("No transactions") - } - - skip := make(map[int]struct{}) - uxHashes := make(coin.UxHashSet, len(txns)) - for i, txn := range txns { - // Check the transaction against itself. This covers the hash, - // signature indices and duplicate spends within itself - if err := bc.VerifyBlockTxnConstraints(tx, txn); err != nil { - switch err.(type) { - case ErrTxnViolatesSoftConstraint: - logger.Critical().WithError(err).Panic("bc.VerifyBlockTxnConstraints should not return a ErrTxnViolatesSoftConstraint error") - case ErrTxnViolatesHardConstraint: - if bc.cfg.Arbitrating { - skip[i] = struct{}{} - continue - } - } - - return nil, err - } - - // Check that each pending unspent will be unique - uxb := coin.UxBody{ - SrcTransaction: txn.Hash(), - } - - for _, to := range txn.Out { - uxb.Coins = to.Coins - uxb.Hours = to.Hours - uxb.Address = to.Address - - h := uxb.Hash() - _, exists := uxHashes[h] - if exists { - if bc.cfg.Arbitrating { - skip[i] = struct{}{} - continue - } else { - return nil, errors.New("Duplicate unspent output across transactions") - } - } - - if DebugLevel1 { - // Check that the expected unspent is not already in the pool. - // This should never happen because its a hash collision - if contains, err := bc.Unspent().Contains(tx, h); err != nil { - return nil, err - } else if contains { - if bc.cfg.Arbitrating { - skip[i] = struct{}{} - continue - } else { - return nil, errors.New("Output hash is in the UnspentPool") - } - } - } - - uxHashes[h] = struct{}{} - } - } - - // Filter invalid transactions before arbitrating between colliding ones - if len(skip) > 0 { - newtxns := make(coin.Transactions, len(txns)-len(skip)) - j := 0 - for i := range txns { - if _, shouldSkip := skip[i]; !shouldSkip { - newtxns[j] = txns[i] - j++ - } - } - txns = newtxns - skip = make(map[int]struct{}) - } - - // Check to ensure that there are no duplicate spends in the entire block, - // and that we aren't creating duplicate outputs. Duplicate outputs - // within a single Transaction are already checked by VerifyBlockTxnConstraints - hashes := txns.Hashes() - for i := 0; i < len(txns)-1; i++ { - s := txns[i] - for j := i + 1; j < len(txns); j++ { - t := txns[j] - if DebugLevel1 { - if hashes[i] == hashes[j] { - // This is a non-recoverable error for filtering, and - // should never occur. It indicates a hash collision - // amongst different txns. Duplicate transactions are - // caught earlier, when duplicate expected outputs are - // checked for, and will not trigger this. - return nil, errors.New("Unexpected duplicate transaction") - } - } - for a := range s.In { - for b := range t.In { - if s.In[a] == t.In[b] { - if bc.cfg.Arbitrating { - // The txn with the highest fee and lowest hash - // is chosen when attempting a double spend. - // Since the txns are sorted, we skip the 2nd - // iterable - skip[j] = struct{}{} - } else { - return nil, errors.New("Cannot spend output twice in the same block") - } - } - } - } - } - } - - // Filter the final results, if necessary - if len(skip) > 0 { - newtxns := make(coin.Transactions, 0, len(txns)-len(skip)) - for i := range txns { - if _, shouldSkip := skip[i]; !shouldSkip { - newtxns = append(newtxns, txns[i]) - } - } - return newtxns, nil - } - - return txns, nil -} - -// TransactionFee calculates the current transaction fee in coinhours of a Transaction -func (bc Blockchain) TransactionFee(tx *dbutil.Tx, headTime uint64) coin.FeeCalculator { - return func(txn *coin.Transaction) (uint64, error) { - inUxs, err := bc.Unspent().GetArray(tx, txn.In) - if err != nil { - return 0, err - } - - return fee.TransactionFee(txn, headTime, inUxs) - } -} - -// VerifySignature checks that BlockSigs state correspond with coin.Blockchain state -// and that all signatures are valid. -func (bc *Blockchain) VerifySignature(block *coin.SignedBlock) error { - err := block.VerifySignature(bc.cfg.Pubkey) - if err != nil { - logger.Errorf("Blockchain signature verification failed for block %d: %v", block.Head.BkSeq, err) - } - return err -} - -// WalkChain walk through the blockchain concurrently -// The quit channel is optional and if closed, this method still stop. -func (bc *Blockchain) WalkChain(workers int, f func(*dbutil.Tx, *coin.SignedBlock) error, quit chan struct{}) error { - if quit == nil { - quit = make(chan struct{}) - } - - signedBlockC := make(chan *coin.SignedBlock, 100) - errC := make(chan error, 100) - interrupt := make(chan struct{}) - verifyDone := make(chan struct{}) - - // Verify block signatures in a worker pool - var workerWg sync.WaitGroup - workerWg.Add(workers) - for i := 0; i < workers; i++ { - go func() { - defer workerWg.Done() - if err := bc.db.View("WalkChain verify blocks", func(tx *dbutil.Tx) error { - for b := range signedBlockC { - if err := f(tx, b); err != nil { - // if err := cipher.VerifyPubKeySignedHash(bc.cfg.Pubkey, sh.sig, sh.hash); err != nil { - // logger.Errorf("Signature verification failed: %v", err) - select { - case errC <- err: - default: - } - } - } - return nil - }); err != nil { - logger.WithError(err).Error("WalkChain verify blocks db transaction failed") - } - }() - } - - // Wait for verification worker goroutines to finish - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - workerWg.Wait() - close(verifyDone) - }() - - // Iterate all blocks stored in the "blocks" bucket - // * Detect if a corresponding signature is missing from the signatures bucket - // * Verify the signature for the block - wg.Add(1) - go func() { - if err := bc.db.View("WalkChain get blocks", func(tx *dbutil.Tx) error { - if length, err := bc.Len(tx); err != nil { - return err - } else if length == 0 { - return nil - } - defer wg.Done() - defer close(signedBlockC) - - errInterrupted := errors.New("goroutine was stopped") - - if err := bc.store.ForEachBlock(tx, func(block *coin.Block) error { - sig, ok, err := bc.store.GetBlockSignature(tx, block) - if err != nil { - return err - } - if !ok { - return blockdb.NewErrMissingSignature(block) - } - - signedBlock := &coin.SignedBlock{ - Sig: sig, - Block: *block, - } - - select { - case signedBlockC <- signedBlock: - return nil - case <-quit: - return errInterrupted - case <-interrupt: - return errInterrupted - } - }); err != nil && err != errInterrupted { - switch err.(type) { - case blockdb.ErrMissingSignature: - default: - logger.Errorf("bc.store.ForEachBlock failed: %v", err) - } - select { - case errC <- err: - default: - } - } - return nil - }); err != nil { - logger.WithError(err).Error("WalkChain get blocks db transaction failed") - } - }() - - var err error - select { - case err = <-errC: - if err != nil { - break - } - case <-quit: - err = ErrVerifyStopped - break - case <-verifyDone: - break - } - - close(interrupt) - wg.Wait() - return err -} - -// VerifyBlockHeader Returns error if the BlockHeader is not valid -func (bc Blockchain) verifyBlockHeader(tx *dbutil.Tx, b coin.Block) error { - head, err := bc.Head(tx) - if err != nil { - return err - } - - //check BkSeq - if b.Head.BkSeq != head.Head.BkSeq+1 { - return errors.New("BkSeq invalid") - } - //check Time, only requirement is that its monotonely increasing - if b.Head.Time <= head.Head.Time { - return errors.New("Block time must be > head time") - } - // Check block hash against previous head - if b.Head.PrevHash != head.HashHeader() { - return errors.New("PrevHash does not match current head") - } - - if b.Body.Hash() != b.Head.BodyHash { - return errors.New("Computed body hash does not match") - } - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_skyencoder.go deleted file mode 100644 index 313dbff..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_skyencoder.go +++ /dev/null @@ -1,513 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package blockdb - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeBlock computes the size of an encoded object of type Block -func encodeSizeBlock(obj *coin.Block) uint64 { - i0 := uint64(0) - - // obj.Head.Version - i0 += 4 - - // obj.Head.Time - i0 += 8 - - // obj.Head.BkSeq - i0 += 8 - - // obj.Head.Fee - i0 += 8 - - // obj.Head.PrevHash - i0 += 32 - - // obj.Head.BodyHash - i0 += 32 - - // obj.Head.UxHash - i0 += 32 - - // obj.Body.Transactions - i0 += 4 - for _, x1 := range obj.Body.Transactions { - i1 := uint64(0) - - // x1.Length - i1 += 4 - - // x1.Type - i1++ - - // x1.InnerHash - i1 += 32 - - // x1.Sigs - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 65 - - i1 += uint64(len(x1.Sigs)) * i2 - } - - // x1.In - i1 += 4 - { - i2 := uint64(0) - - // x2 - i2 += 32 - - i1 += uint64(len(x1.In)) * i2 - } - - // x1.Out - i1 += 4 - { - i2 := uint64(0) - - // x2.Address.Version - i2++ - - // x2.Address.Key - i2 += 20 - - // x2.Coins - i2 += 8 - - // x2.Hours - i2 += 8 - - i1 += uint64(len(x1.Out)) * i2 - } - - i0 += i1 - } - - return i0 -} - -// encodeBlock encodes an object of type Block to a buffer allocated to the exact size -// required to encode the object. -func encodeBlock(obj *coin.Block) ([]byte, error) { - n := encodeSizeBlock(obj) - buf := make([]byte, n) - - if err := encodeBlockToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeBlockToBuffer encodes an object of type Block to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeBlockToBuffer(buf []byte, obj *coin.Block) error { - if uint64(len(buf)) < encodeSizeBlock(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Head.Version - e.Uint32(obj.Head.Version) - - // obj.Head.Time - e.Uint64(obj.Head.Time) - - // obj.Head.BkSeq - e.Uint64(obj.Head.BkSeq) - - // obj.Head.Fee - e.Uint64(obj.Head.Fee) - - // obj.Head.PrevHash - e.CopyBytes(obj.Head.PrevHash[:]) - - // obj.Head.BodyHash - e.CopyBytes(obj.Head.BodyHash[:]) - - // obj.Head.UxHash - e.CopyBytes(obj.Head.UxHash[:]) - - // obj.Body.Transactions maxlen check - if len(obj.Body.Transactions) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Body.Transactions length check - if uint64(len(obj.Body.Transactions)) > math.MaxUint32 { - return errors.New("obj.Body.Transactions length exceeds math.MaxUint32") - } - - // obj.Body.Transactions length - e.Uint32(uint32(len(obj.Body.Transactions))) - - // obj.Body.Transactions - for _, x := range obj.Body.Transactions { - - // x.Length - e.Uint32(x.Length) - - // x.Type - e.Uint8(x.Type) - - // x.InnerHash - e.CopyBytes(x.InnerHash[:]) - - // x.Sigs maxlen check - if len(x.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Sigs length check - if uint64(len(x.Sigs)) > math.MaxUint32 { - return errors.New("x.Sigs length exceeds math.MaxUint32") - } - - // x.Sigs length - e.Uint32(uint32(len(x.Sigs))) - - // x.Sigs - for _, x := range x.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // x.In maxlen check - if len(x.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.In length check - if uint64(len(x.In)) > math.MaxUint32 { - return errors.New("x.In length exceeds math.MaxUint32") - } - - // x.In length - e.Uint32(uint32(len(x.In))) - - // x.In - for _, x := range x.In { - - // x - e.CopyBytes(x[:]) - - } - - // x.Out maxlen check - if len(x.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // x.Out length check - if uint64(len(x.Out)) > math.MaxUint32 { - return errors.New("x.Out length exceeds math.MaxUint32") - } - - // x.Out length - e.Uint32(uint32(len(x.Out))) - - // x.Out - for _, x := range x.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - } - - return nil -} - -// decodeBlock decodes an object of type Block from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeBlock(buf []byte, obj *coin.Block) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Head.Version - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Head.Version = i - } - - { - // obj.Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Head.Time = i - } - - { - // obj.Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Head.BkSeq = i - } - - { - // obj.Head.Fee - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Head.Fee = i - } - - { - // obj.Head.PrevHash - if len(d.Buffer) < len(obj.Head.PrevHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Head.PrevHash[:], d.Buffer[:len(obj.Head.PrevHash)]) - d.Buffer = d.Buffer[len(obj.Head.PrevHash):] - } - - { - // obj.Head.BodyHash - if len(d.Buffer) < len(obj.Head.BodyHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Head.BodyHash[:], d.Buffer[:len(obj.Head.BodyHash)]) - d.Buffer = d.Buffer[len(obj.Head.BodyHash):] - } - - { - // obj.Head.UxHash - if len(d.Buffer) < len(obj.Head.UxHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Head.UxHash[:], d.Buffer[:len(obj.Head.UxHash)]) - d.Buffer = d.Buffer[len(obj.Head.UxHash):] - } - - { - // obj.Body.Transactions - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Body.Transactions = make([]coin.Transaction, length) - - for z2 := range obj.Body.Transactions { - { - // obj.Body.Transactions[z2].Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Body.Transactions[z2].Length = i - } - - { - // obj.Body.Transactions[z2].Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Body.Transactions[z2].Type = i - } - - { - // obj.Body.Transactions[z2].InnerHash - if len(d.Buffer) < len(obj.Body.Transactions[z2].InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.Transactions[z2].InnerHash[:], d.Buffer[:len(obj.Body.Transactions[z2].InnerHash)]) - d.Buffer = d.Buffer[len(obj.Body.Transactions[z2].InnerHash):] - } - - { - // obj.Body.Transactions[z2].Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Body.Transactions[z2].Sigs = make([]cipher.Sig, length) - - for z4 := range obj.Body.Transactions[z2].Sigs { - { - // obj.Body.Transactions[z2].Sigs[z4] - if len(d.Buffer) < len(obj.Body.Transactions[z2].Sigs[z4]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.Transactions[z2].Sigs[z4][:], d.Buffer[:len(obj.Body.Transactions[z2].Sigs[z4])]) - d.Buffer = d.Buffer[len(obj.Body.Transactions[z2].Sigs[z4]):] - } - - } - } - } - - { - // obj.Body.Transactions[z2].In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Body.Transactions[z2].In = make([]cipher.SHA256, length) - - for z4 := range obj.Body.Transactions[z2].In { - { - // obj.Body.Transactions[z2].In[z4] - if len(d.Buffer) < len(obj.Body.Transactions[z2].In[z4]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.Transactions[z2].In[z4][:], d.Buffer[:len(obj.Body.Transactions[z2].In[z4])]) - d.Buffer = d.Buffer[len(obj.Body.Transactions[z2].In[z4]):] - } - - } - } - } - - { - // obj.Body.Transactions[z2].Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Body.Transactions[z2].Out = make([]coin.TransactionOutput, length) - - for z4 := range obj.Body.Transactions[z2].Out { - { - // obj.Body.Transactions[z2].Out[z4].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Body.Transactions[z2].Out[z4].Address.Version = i - } - - { - // obj.Body.Transactions[z2].Out[z4].Address.Key - if len(d.Buffer) < len(obj.Body.Transactions[z2].Out[z4].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.Transactions[z2].Out[z4].Address.Key[:], d.Buffer[:len(obj.Body.Transactions[z2].Out[z4].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Body.Transactions[z2].Out[z4].Address.Key):] - } - - { - // obj.Body.Transactions[z2].Out[z4].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Body.Transactions[z2].Out[z4].Coins = i - } - - { - // obj.Body.Transactions[z2].Out[z4].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Body.Transactions[z2].Out[z4].Hours = i - } - - } - } - } - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeBlockExact decodes an object of type Block from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeBlockExact(buf []byte, obj *coin.Block) error { - if n, err := decodeBlock(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_tree.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_tree.go deleted file mode 100644 index e9cd246..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/block_tree.go +++ /dev/null @@ -1,269 +0,0 @@ -package blockdb - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - errBlockExist = errors.New("block already exists") - errNoParent = errors.New("block is not genesis and has no parent") - errWrongParent = errors.New("wrong parent") - errHasChild = errors.New("remove block failed, it has children") - - // BlocksBkt holds coin.Blocks - BlocksBkt = []byte("blocks") - // TreeBkt maps block height to a (prev, hash) pair for a block - TreeBkt = []byte("block_tree") -) - -// Walker function for go through blockchain -type Walker func(*dbutil.Tx, []coin.HashPair) (cipher.SHA256, bool) - -// blockTree use the blockdb store all blocks and maintains the block tree struct. -type blockTree struct{} - -// AddBlock adds block with *dbutil.Tx -func (bt *blockTree) AddBlock(tx *dbutil.Tx, b *coin.Block) error { - // can't store block if it's not genesis block and has no parent. - if b.Seq() > 0 && b.Head.PrevHash.Null() { - return errNoParent - } - - // check if the block already exists. - hash := b.HashHeader() - if ok, err := dbutil.BucketHasKey(tx, BlocksBkt, hash[:]); err != nil { - return err - } else if ok { - return errBlockExist - } - - // write block into blocks bucket. - buf, err := encodeBlock(b) - if err != nil { - return err - } - - if err := dbutil.PutBucketValue(tx, BlocksBkt, hash[:], buf); err != nil { - return err - } - - // the pre hash must be in depth - 1. - if b.Seq() > 0 { - parentHashPair, err := getHashPairInDepth(tx, b.Seq()-1, func(hp coin.HashPair) bool { - return hp.Hash == b.Head.PrevHash - }) - if err != nil { - return err - } - if len(parentHashPair) == 0 { - return errWrongParent - } - } - - hp := coin.HashPair{ - Hash: hash, - PrevHash: b.Head.PrevHash, - } - - // get block pairs in the depth - hashPairs, err := getHashPairInDepth(tx, b.Seq(), allPairs) - if err != nil { - return err - } - - if len(hashPairs) == 0 { - // no hash pair exist in the depth. - // write the hash pair into tree. - return setHashPairInDepth(tx, b.Seq(), []coin.HashPair{hp}) - } - - // check dup block - if containHash(hashPairs, hp) { - return errBlockExist - } - - hashPairs = append(hashPairs, hp) - return setHashPairInDepth(tx, b.Seq(), hashPairs) -} - -// RemoveBlock remove block from blocks bucket and tree bucket. -// can't remove block if it has children. -func (bt *blockTree) RemoveBlock(tx *dbutil.Tx, b *coin.Block) error { - // delete block in blocks bucket. - hash := b.HashHeader() - if err := dbutil.Delete(tx, BlocksBkt, hash[:]); err != nil { - return err - } - - // check if this block has children - if has, err := hasChild(tx, *b); err != nil { - return err - } else if has { - return errHasChild - } - - // get block hash pairs in depth - hashPairs, err := getHashPairInDepth(tx, b.Seq(), allPairs) - if err != nil { - return err - } - - // remove block hash pair in tree. - ps := removePairs(hashPairs, coin.HashPair{ - Hash: hash, - PrevHash: b.Head.PrevHash, - }) - - if len(ps) == 0 { - return dbutil.Delete(tx, TreeBkt, dbutil.Itob(b.Seq())) - } - - // update the hash pairs in tree. - return setHashPairInDepth(tx, b.Seq(), ps) -} - -// GetBlock get block by hash, return nil on not found -func (bt *blockTree) GetBlock(tx *dbutil.Tx, hash cipher.SHA256) (*coin.Block, error) { - var b coin.Block - - v, err := dbutil.GetBucketValueNoCopy(tx, BlocksBkt, hash[:]) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeBlockExact(v, &b); err != nil { - return nil, err - } - - if hash != b.HashHeader() { - return nil, fmt.Errorf("DB key %s does not match block hash header %s", hash, b.HashHeader()) - } - - return &b, nil -} - -// GetBlockInDepth get block in depth, return nil on not found, -// the filter is used to choose the appropriate block. -func (bt *blockTree) GetBlockInDepth(tx *dbutil.Tx, depth uint64, filter Walker) (*coin.Block, error) { - hash, ok, err := bt.getHashInDepth(tx, depth, filter) - if err != nil { - return nil, fmt.Errorf("BlockTree.getHashInDepth failed: %v", err) - } else if !ok { - return nil, nil - } - - return bt.GetBlock(tx, hash) -} - -// ForEachBlock iterates all blocks and calls f on them -func (bt *blockTree) ForEachBlock(tx *dbutil.Tx, f func(b *coin.Block) error) error { - return dbutil.ForEach(tx, BlocksBkt, func(_, v []byte) error { - var b coin.Block - if err := decodeBlockExact(v, &b); err != nil { - return err - } - - return f(&b) - }) -} - -func (bt *blockTree) getHashInDepth(tx *dbutil.Tx, depth uint64, filter Walker) (cipher.SHA256, bool, error) { - var pairs hashPairsWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, TreeBkt, dbutil.Itob(depth)) - if err != nil { - return cipher.SHA256{}, false, err - } else if v == nil { - return cipher.SHA256{}, false, nil - } - - if err := decodeHashPairsWrapperExact(v, &pairs); err != nil { - return cipher.SHA256{}, false, err - } - - hash, ok := filter(tx, pairs.HashPairs) - if !ok { - return cipher.SHA256{}, false, errors.New("No hash found in depth") - } - - return hash, true, nil -} - -func containHash(hashPairs []coin.HashPair, pair coin.HashPair) bool { - for _, p := range hashPairs { - if p.Hash == pair.Hash { - return true - } - } - return false -} - -func removePairs(hps []coin.HashPair, pair coin.HashPair) []coin.HashPair { - pairs := []coin.HashPair{} - for _, p := range hps { - if p.Hash == pair.Hash && p.PrevHash == pair.PrevHash { - continue - } - pairs = append(pairs, p) - } - return pairs -} - -func getHashPairInDepth(tx *dbutil.Tx, depth uint64, fn func(hp coin.HashPair) bool) ([]coin.HashPair, error) { - var hps hashPairsWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, TreeBkt, dbutil.Itob(depth)) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeHashPairsWrapperExact(v, &hps); err != nil { - return nil, err - } - - var pairs []coin.HashPair - for _, ps := range hps.HashPairs { - if fn(ps) { - pairs = append(pairs, ps) - } - } - return pairs, nil -} - -// check if this block has children -func hasChild(tx *dbutil.Tx, b coin.Block) (bool, error) { - // get the child block hash pair, whose pre hash point to current block. - childHashPair, err := getHashPairInDepth(tx, b.Head.BkSeq+1, func(hp coin.HashPair) bool { - return hp.PrevHash == b.HashHeader() - }) - - if err != nil { - return false, err - } - - return len(childHashPair) > 0, nil -} - -func setHashPairInDepth(tx *dbutil.Tx, depth uint64, hps []coin.HashPair) error { - buf, err := encodeHashPairsWrapper(&hashPairsWrapper{ - HashPairs: hps, - }) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, TreeBkt, dbutil.Itob(depth), buf) -} - -func allPairs(hp coin.HashPair) bool { - return true -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blockchain.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blockchain.go deleted file mode 100644 index c111080..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blockchain.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Package blockdb is the core blockchain database wrapper -*/ -package blockdb - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - logger = logging.MustGetLogger("blockdb") - - // ErrNoHeadBlock is returned when calling Blockchain.Head() when no head block exists - ErrNoHeadBlock = fmt.Errorf("found no head block") -) - -//go:generate skyencoder -unexported -struct Block -output-path . -package blockdb github.com/SkycoinProject/skycoin/src/coin -//go:generate skyencoder -unexported -struct UxOut -output-path . -package blockdb github.com/SkycoinProject/skycoin/src/coin -//go:generate skyencoder -unexported -struct hashPairsWrapper -//go:generate skyencoder -unexported -struct hashesWrapper -//go:generate skyencoder -unexported -struct sigWrapper - -// hashesWrapper wraps []cipher.SHA256 so it can be used by skyencoder -type hashesWrapper struct { - Hashes []cipher.SHA256 -} - -// sigWrapper wraps cipher.Sig in struct so it can be used by skyencoder -type sigWrapper struct { - Sig cipher.Sig -} - -// hashPairsWrapper wraps []coin.HashPair so it can be used by skyencoder -type hashPairsWrapper struct { - HashPairs []coin.HashPair -} - -// ErrMissingSignature is returned if a block in the db does not have a corresponding signature in the db -type ErrMissingSignature struct { - b *coin.Block -} - -// NewErrMissingSignature creates ErrMissingSignature from *coin.Block -func NewErrMissingSignature(b *coin.Block) error { - return ErrMissingSignature{ - b: b, - } -} - -func (e ErrMissingSignature) Error() string { - return fmt.Sprintf("Signature not found for block seq=%d hash=%s", e.b.Head.BkSeq, e.b.HashHeader().Hex()) -} - -// CreateBuckets creates bolt.DB buckets used by the blockdb -func CreateBuckets(tx *dbutil.Tx) error { - return dbutil.CreateBuckets(tx, [][]byte{ - BlockSigsBkt, - BlocksBkt, - TreeBkt, - BlockchainMetaBkt, - UnspentPoolBkt, - UnspentPoolAddrIndexBkt, - UnspentMetaBkt, - }) -} - -// BlockTree block storage -type BlockTree interface { - AddBlock(*dbutil.Tx, *coin.Block) error - GetBlock(*dbutil.Tx, cipher.SHA256) (*coin.Block, error) - GetBlockInDepth(*dbutil.Tx, uint64, Walker) (*coin.Block, error) - ForEachBlock(*dbutil.Tx, func(*coin.Block) error) error -} - -// BlockSigs block signature storage -type BlockSigs interface { - Add(*dbutil.Tx, cipher.SHA256, cipher.Sig) error - Get(*dbutil.Tx, cipher.SHA256) (cipher.Sig, bool, error) - ForEach(*dbutil.Tx, func(cipher.SHA256, cipher.Sig) error) error -} - -//go:generate mockery -name UnspentPooler -case underscore -testonly -inpkg - -// UnspentPooler unspent outputs pool -type UnspentPooler interface { - MaybeBuildIndexes(*dbutil.Tx, uint64) error - Len(*dbutil.Tx) (uint64, error) - Contains(*dbutil.Tx, cipher.SHA256) (bool, error) - Get(*dbutil.Tx, cipher.SHA256) (*coin.UxOut, error) - GetAll(*dbutil.Tx) (coin.UxArray, error) - GetArray(*dbutil.Tx, []cipher.SHA256) (coin.UxArray, error) - GetUxHash(*dbutil.Tx) (cipher.SHA256, error) - GetUnspentsOfAddrs(*dbutil.Tx, []cipher.Address) (coin.AddressUxOuts, error) - GetUnspentHashesOfAddrs(*dbutil.Tx, []cipher.Address) (AddressHashes, error) - ProcessBlock(*dbutil.Tx, *coin.SignedBlock) error - AddressCount(*dbutil.Tx) (uint64, error) -} - -// ChainMeta blockchain metadata -type ChainMeta interface { - GetHeadSeq(*dbutil.Tx) (uint64, bool, error) - SetHeadSeq(*dbutil.Tx, uint64) error -} - -// Blockchain maintain the buckets for blockchain -type Blockchain struct { - db *dbutil.DB - meta ChainMeta - unspent UnspentPooler - tree BlockTree - sigs BlockSigs - walker Walker -} - -// NewBlockchain creates a new blockchain instance -func NewBlockchain(db *dbutil.DB, walker Walker) (*Blockchain, error) { - if db == nil { - return nil, errors.New("db is nil") - } - - if walker == nil { - return nil, errors.New("blockchain walker is nil") - } - - return &Blockchain{ - db: db, - unspent: NewUnspentPool(), - meta: &chainMeta{}, - tree: &blockTree{}, - sigs: &blockSigs{}, - walker: walker, - }, nil -} - -// UnspentPool returns the unspent pool -func (bc *Blockchain) UnspentPool() UnspentPooler { - return bc.unspent -} - -// AddBlock adds signed block -func (bc *Blockchain) AddBlock(tx *dbutil.Tx, sb *coin.SignedBlock) error { - if err := bc.sigs.Add(tx, sb.HashHeader(), sb.Sig); err != nil { - return fmt.Errorf("save signature failed: %v", err) - } - - if err := bc.tree.AddBlock(tx, &sb.Block); err != nil { - return fmt.Errorf("save block failed: %v", err) - } - - // update block head seq and unspent pool - if err := bc.processBlock(tx, sb); err != nil { - return err - } - - return nil -} - -// processBlock processes a block and updates the db -func (bc *Blockchain) processBlock(tx *dbutil.Tx, b *coin.SignedBlock) error { - if err := bc.unspent.ProcessBlock(tx, b); err != nil { - return err - } - - return bc.meta.SetHeadSeq(tx, b.Seq()) -} - -// Head returns head block, returns error if no head block exists -func (bc *Blockchain) Head(tx *dbutil.Tx) (*coin.SignedBlock, error) { - seq, ok, err := bc.HeadSeq(tx) - if err != nil { - return nil, err - } else if !ok { - return nil, ErrNoHeadBlock - } - - b, err := bc.GetSignedBlockBySeq(tx, seq) - if err != nil { - return nil, err - } - - if b == nil { - return nil, ErrNoHeadBlock - } - - return b, nil -} - -// HeadSeq returns the head block sequence -func (bc *Blockchain) HeadSeq(tx *dbutil.Tx) (uint64, bool, error) { - return bc.meta.GetHeadSeq(tx) -} - -// Len returns blockchain length -func (bc *Blockchain) Len(tx *dbutil.Tx) (uint64, error) { - seq, ok, err := bc.meta.GetHeadSeq(tx) - if err != nil { - return 0, err - } else if !ok { - return 0, nil - } - - return seq + 1, nil -} - -// GetBlockSignature returns the signature of a block -func (bc *Blockchain) GetBlockSignature(tx *dbutil.Tx, b *coin.Block) (cipher.Sig, bool, error) { - return bc.sigs.Get(tx, b.HashHeader()) -} - -// GetBlockByHash returns block of given hash -func (bc *Blockchain) GetBlockByHash(tx *dbutil.Tx, hash cipher.SHA256) (*coin.Block, error) { - b, err := bc.tree.GetBlock(tx, hash) - if err != nil { - return nil, err - } - - return b, nil -} - -// GetSignedBlockByHash returns signed block of given hash -func (bc *Blockchain) GetSignedBlockByHash(tx *dbutil.Tx, hash cipher.SHA256) (*coin.SignedBlock, error) { - b, err := bc.tree.GetBlock(tx, hash) - if err != nil { - return nil, err - } - if b == nil { - return nil, nil - } - - // get signature - sig, ok, err := bc.sigs.Get(tx, hash) - if err != nil { - return nil, fmt.Errorf("find signature of block: %v failed: %v", hash.Hex(), err) - } - - if !ok { - return nil, NewErrMissingSignature(b) - } - - return &coin.SignedBlock{ - Block: *b, - Sig: sig, - }, nil -} - -// GetSignedBlockBySeq returns signed block of given seq -func (bc *Blockchain) GetSignedBlockBySeq(tx *dbutil.Tx, seq uint64) (*coin.SignedBlock, error) { - b, err := bc.tree.GetBlockInDepth(tx, seq, bc.walker) - if err != nil { - return nil, fmt.Errorf("bc.tree.GetBlockInDepth failed: %v", err) - } - if b == nil { - return nil, nil - } - - sig, ok, err := bc.sigs.Get(tx, b.HashHeader()) - if err != nil { - return nil, fmt.Errorf("find signature of block: %v failed: %v", seq, err) - } - - if !ok { - return nil, NewErrMissingSignature(b) - } - - return &coin.SignedBlock{ - Block: *b, - Sig: sig, - }, nil -} - -// GetGenesisBlock returns genesis block -func (bc *Blockchain) GetGenesisBlock(tx *dbutil.Tx) (*coin.SignedBlock, error) { - return bc.GetSignedBlockBySeq(tx, 0) -} - -// ForEachBlock iterates all blocks and calls f on them -func (bc *Blockchain) ForEachBlock(tx *dbutil.Tx, f func(b *coin.Block) error) error { - return bc.tree.ForEachBlock(tx, f) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blocksigs.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blocksigs.go deleted file mode 100644 index 16b5cda..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/blocksigs.go +++ /dev/null @@ -1,69 +0,0 @@ -package blockdb - -import ( - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // BlockSigsBkt holds block signatures - BlockSigsBkt = []byte("block_sigs") -) - -// blockSigs manages known blockSigs as received. -// TODO -- support out of order blocks. This requires a change to the -// message protocol to support ranges similar to bitcoin's locator hashes. -// We also need to keep track of whether a block has been executed so that -// as continuity is established we can execute chains of blocks. -// TODO -- Since we will need to hold blocks that cannot be verified -// immediately against the blockchain, we need to be able to hold multiple -// blockSigs per BkSeq, or use hashes as keys. For now, this is not a -// problem assuming the signed blocks created by a block publisher are valid blocks, -// because we can check the signature independently of the blockchain. -type blockSigs struct{} - -// Get returns the signature of a specific block -func (bs *blockSigs) Get(tx *dbutil.Tx, hash cipher.SHA256) (cipher.Sig, bool, error) { - var sig sigWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, BlockSigsBkt, hash[:]) - if err != nil { - return cipher.Sig{}, false, err - } else if v == nil { - return cipher.Sig{}, false, nil - } - - if err := decodeSigWrapperExact(v, &sig); err != nil { - return cipher.Sig{}, false, err - } - - return sig.Sig, true, nil -} - -// Add adds a signed block to the db -func (bs *blockSigs) Add(tx *dbutil.Tx, hash cipher.SHA256, sig cipher.Sig) error { - buf, err := encodeSigWrapper(&sigWrapper{ - Sig: sig, - }) - if err != nil { - return err - } - return dbutil.PutBucketValue(tx, BlockSigsBkt, hash[:], buf) -} - -// ForEach iterates all signatures and calls f on them -func (bs *blockSigs) ForEach(tx *dbutil.Tx, f func(cipher.SHA256, cipher.Sig) error) error { - return dbutil.ForEach(tx, BlockSigsBkt, func(k, v []byte) error { - hash, err := cipher.SHA256FromBytes(k) - if err != nil { - return err - } - - var sig sigWrapper - if err := decodeSigWrapperExact(v, &sig); err != nil { - return err - } - - return f(hash, sig.Sig) - }) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/chain_meta.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/chain_meta.go deleted file mode 100644 index b1a9d35..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/chain_meta.go +++ /dev/null @@ -1,29 +0,0 @@ -package blockdb - -import ( - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // BlockchainMetaBkt holds blockchain metadata - BlockchainMetaBkt = []byte("blockchain_meta") - // blockchain head sequence number - headSeqKey = []byte("head_seq") -) - -type chainMeta struct{} - -func (m chainMeta) SetHeadSeq(tx *dbutil.Tx, seq uint64) error { - return dbutil.PutBucketValue(tx, BlockchainMetaBkt, headSeqKey, dbutil.Itob(seq)) -} - -func (m chainMeta) GetHeadSeq(tx *dbutil.Tx) (uint64, bool, error) { - v, err := dbutil.GetBucketValue(tx, BlockchainMetaBkt, headSeqKey) - if err != nil { - return 0, false, err - } else if v == nil { - return 0, false, nil - } - - return dbutil.Btoi(v), true, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hash_pairs_wrapper_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hash_pairs_wrapper_skyencoder.go deleted file mode 100644 index 9bc4b04..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hash_pairs_wrapper_skyencoder.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package blockdb - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeHashPairsWrapper computes the size of an encoded object of type hashPairsWrapper -func encodeSizeHashPairsWrapper(obj *hashPairsWrapper) uint64 { - i0 := uint64(0) - - // obj.HashPairs - i0 += 4 - { - i1 := uint64(0) - - // x1.Hash - i1 += 32 - - // x1.PrevHash - i1 += 32 - - i0 += uint64(len(obj.HashPairs)) * i1 - } - - return i0 -} - -// encodeHashPairsWrapper encodes an object of type hashPairsWrapper to a buffer allocated to the exact size -// required to encode the object. -func encodeHashPairsWrapper(obj *hashPairsWrapper) ([]byte, error) { - n := encodeSizeHashPairsWrapper(obj) - buf := make([]byte, n) - - if err := encodeHashPairsWrapperToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeHashPairsWrapperToBuffer encodes an object of type hashPairsWrapper to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeHashPairsWrapperToBuffer(buf []byte, obj *hashPairsWrapper) error { - if uint64(len(buf)) < encodeSizeHashPairsWrapper(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.HashPairs length check - if uint64(len(obj.HashPairs)) > math.MaxUint32 { - return errors.New("obj.HashPairs length exceeds math.MaxUint32") - } - - // obj.HashPairs length - e.Uint32(uint32(len(obj.HashPairs))) - - // obj.HashPairs - for _, x := range obj.HashPairs { - - // x.Hash - e.CopyBytes(x.Hash[:]) - - // x.PrevHash - e.CopyBytes(x.PrevHash[:]) - - } - - return nil -} - -// decodeHashPairsWrapper decodes an object of type hashPairsWrapper from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeHashPairsWrapper(buf []byte, obj *hashPairsWrapper) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.HashPairs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.HashPairs = make([]coin.HashPair, length) - - for z1 := range obj.HashPairs { - { - // obj.HashPairs[z1].Hash - if len(d.Buffer) < len(obj.HashPairs[z1].Hash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.HashPairs[z1].Hash[:], d.Buffer[:len(obj.HashPairs[z1].Hash)]) - d.Buffer = d.Buffer[len(obj.HashPairs[z1].Hash):] - } - - { - // obj.HashPairs[z1].PrevHash - if len(d.Buffer) < len(obj.HashPairs[z1].PrevHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.HashPairs[z1].PrevHash[:], d.Buffer[:len(obj.HashPairs[z1].PrevHash)]) - d.Buffer = d.Buffer[len(obj.HashPairs[z1].PrevHash):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeHashPairsWrapperExact decodes an object of type hashPairsWrapper from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeHashPairsWrapperExact(buf []byte, obj *hashPairsWrapper) error { - if n, err := decodeHashPairsWrapper(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hashes_wrapper_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hashes_wrapper_skyencoder.go deleted file mode 100644 index f88f308..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/hashes_wrapper_skyencoder.go +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package blockdb - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeHashesWrapper computes the size of an encoded object of type hashesWrapper -func encodeSizeHashesWrapper(obj *hashesWrapper) uint64 { - i0 := uint64(0) - - // obj.Hashes - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Hashes)) * i1 - } - - return i0 -} - -// encodeHashesWrapper encodes an object of type hashesWrapper to a buffer allocated to the exact size -// required to encode the object. -func encodeHashesWrapper(obj *hashesWrapper) ([]byte, error) { - n := encodeSizeHashesWrapper(obj) - buf := make([]byte, n) - - if err := encodeHashesWrapperToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeHashesWrapperToBuffer encodes an object of type hashesWrapper to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeHashesWrapperToBuffer(buf []byte, obj *hashesWrapper) error { - if uint64(len(buf)) < encodeSizeHashesWrapper(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Hashes length check - if uint64(len(obj.Hashes)) > math.MaxUint32 { - return errors.New("obj.Hashes length exceeds math.MaxUint32") - } - - // obj.Hashes length - e.Uint32(uint32(len(obj.Hashes))) - - // obj.Hashes - for _, x := range obj.Hashes { - - // x - e.CopyBytes(x[:]) - - } - - return nil -} - -// decodeHashesWrapper decodes an object of type hashesWrapper from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeHashesWrapper(buf []byte, obj *hashesWrapper) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Hashes - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.Hashes = make([]cipher.SHA256, length) - - for z1 := range obj.Hashes { - { - // obj.Hashes[z1] - if len(d.Buffer) < len(obj.Hashes[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Hashes[z1][:], d.Buffer[:len(obj.Hashes[z1])]) - d.Buffer = d.Buffer[len(obj.Hashes[z1]):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeHashesWrapperExact decodes an object of type hashesWrapper from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeHashesWrapperExact(buf []byte, obj *hashesWrapper) error { - if n, err := decodeHashesWrapper(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/sig_wrapper_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/sig_wrapper_skyencoder.go deleted file mode 100644 index 87526f3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/sig_wrapper_skyencoder.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package blockdb - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeSigWrapper computes the size of an encoded object of type sigWrapper -func encodeSizeSigWrapper(obj *sigWrapper) uint64 { - i0 := uint64(0) - - // obj.Sig - i0 += 65 - - return i0 -} - -// encodeSigWrapper encodes an object of type sigWrapper to a buffer allocated to the exact size -// required to encode the object. -func encodeSigWrapper(obj *sigWrapper) ([]byte, error) { - n := encodeSizeSigWrapper(obj) - buf := make([]byte, n) - - if err := encodeSigWrapperToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeSigWrapperToBuffer encodes an object of type sigWrapper to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeSigWrapperToBuffer(buf []byte, obj *sigWrapper) error { - if uint64(len(buf)) < encodeSizeSigWrapper(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Sig - e.CopyBytes(obj.Sig[:]) - - return nil -} - -// decodeSigWrapper decodes an object of type sigWrapper from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeSigWrapper(buf []byte, obj *sigWrapper) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Sig - if len(d.Buffer) < len(obj.Sig) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Sig[:], d.Buffer[:len(obj.Sig)]) - d.Buffer = d.Buffer[len(obj.Sig):] - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeSigWrapperExact decodes an object of type sigWrapper from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeSigWrapperExact(buf []byte, obj *sigWrapper) error { - if n, err := decodeSigWrapper(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/unspent.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/unspent.go deleted file mode 100644 index 6e1c04d..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/unspent.go +++ /dev/null @@ -1,541 +0,0 @@ -package blockdb - -import ( - "bytes" - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - xorhashKey = []byte("xorhash") - addrIndexHeightKey = []byte("addr_index_height") - - // UnspentPoolBkt holds unspent outputs, indexed by unspent output hash - UnspentPoolBkt = []byte("unspent_pool") - // UnspentPoolAddrIndexBkt maps addresses to their unspent outputs - UnspentPoolAddrIndexBkt = []byte("unspent_pool_addr_index") - // UnspentMetaBkt holds unspent output metadata - UnspentMetaBkt = []byte("unspent_meta") -) - -// ErrUnspentNotExist is returned if an unspent is not found in the pool -type ErrUnspentNotExist struct { - UxID string -} - -// NewErrUnspentNotExist creates ErrUnspentNotExist from a UxID -func NewErrUnspentNotExist(uxID string) error { - return ErrUnspentNotExist{ - UxID: uxID, - } -} - -func (e ErrUnspentNotExist) Error() string { - return fmt.Sprintf("unspent output of %s does not exist", e.UxID) -} - -// AddressHashes maps addresses to a set of hashes -type AddressHashes map[cipher.Address][]cipher.SHA256 - -// Flatten flattens all hash sets from AddressHashes to one slice -func (a AddressHashes) Flatten() []cipher.SHA256 { - total := 0 - for _, h := range a { - total += len(h) - } - - hashes := make([]cipher.SHA256, total) - - i := 0 - for _, h := range a { - copy(hashes[i:], h) - i += len(h) - } - - return hashes -} - -type unspentMeta struct{} - -func (m unspentMeta) getXorHash(tx *dbutil.Tx) (cipher.SHA256, error) { - v, err := dbutil.GetBucketValue(tx, UnspentMetaBkt, xorhashKey) - if err != nil { - return cipher.SHA256{}, err - } else if v == nil { - return cipher.SHA256{}, nil - } - - return cipher.SHA256FromBytes(v) -} - -func (m *unspentMeta) setXorHash(tx *dbutil.Tx, hash cipher.SHA256) error { - return dbutil.PutBucketValue(tx, UnspentMetaBkt, xorhashKey, hash[:]) -} - -func (m *unspentMeta) getAddrIndexHeight(tx *dbutil.Tx) (uint64, bool, error) { - v, err := dbutil.GetBucketValue(tx, UnspentMetaBkt, addrIndexHeightKey) - if err != nil { - return 0, false, err - } else if v == nil { - return 0, false, nil - } - - return dbutil.Btoi(v), true, nil -} - -func (m *unspentMeta) setAddrIndexHeight(tx *dbutil.Tx, height uint64) error { - return dbutil.PutBucketValue(tx, UnspentMetaBkt, addrIndexHeightKey, dbutil.Itob(height)) -} - -type pool struct{} - -func (pl pool) get(tx *dbutil.Tx, hash cipher.SHA256) (*coin.UxOut, error) { - var out coin.UxOut - - v, err := dbutil.GetBucketValueNoCopy(tx, UnspentPoolBkt, hash[:]) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeUxOutExact(v, &out); err != nil { - return nil, err - } - - return &out, nil -} - -func (pl pool) getAll(tx *dbutil.Tx) (coin.UxArray, error) { - var uxa coin.UxArray - - if err := dbutil.ForEach(tx, UnspentPoolBkt, func(_, v []byte) error { - var ux coin.UxOut - if err := decodeUxOutExact(v, &ux); err != nil { - return err - } - - uxa = append(uxa, ux) - return nil - }); err != nil { - return nil, err - } - - return uxa, nil -} - -func (pl pool) put(tx *dbutil.Tx, hash cipher.SHA256, ux coin.UxOut) error { - buf, err := encodeUxOut(&ux) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, UnspentPoolBkt, hash[:], buf) -} - -func (pl *pool) delete(tx *dbutil.Tx, hash cipher.SHA256) error { - return dbutil.Delete(tx, UnspentPoolBkt, hash[:]) -} - -type poolAddrIndex struct{} - -func (p poolAddrIndex) get(tx *dbutil.Tx, addr cipher.Address) ([]cipher.SHA256, error) { - var hashes hashesWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, UnspentPoolAddrIndexBkt, addr.Bytes()) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeHashesWrapperExact(v, &hashes); err != nil { - return nil, err - } - - return hashes.Hashes, nil -} - -func (p poolAddrIndex) put(tx *dbutil.Tx, addr cipher.Address, hashes []cipher.SHA256) error { - if len(hashes) == 0 { - return errors.New("poolAddrIndex.put cannot put empty hash array") - } - - hashesMap := make(map[cipher.SHA256]struct{}, len(hashes)) - for _, h := range hashes { - if _, ok := hashesMap[h]; ok { - return errors.New("poolAddrIndex.put: hashes array contains duplicate") - } - - hashesMap[h] = struct{}{} - } - - buf, err := encodeHashesWrapper(&hashesWrapper{ - Hashes: hashes, - }) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, UnspentPoolAddrIndexBkt, addr.Bytes(), buf) -} - -// adjust adds and removes hashes from an address -> hashes index -// TODO -- if necessary, this can be optimized further to accept multiple addresses at once, -// so that all get queries can be performed before the set -func (p poolAddrIndex) adjust(tx *dbutil.Tx, addr cipher.Address, addHashes, rmHashes []cipher.SHA256) error { - if len(addHashes) == 0 && len(rmHashes) == 0 { - return nil - } - - existingHashes, err := p.get(tx, addr) - if err != nil { - return err - } - - rmHashesMap := make(map[cipher.SHA256]struct{}, len(rmHashes)) - for _, h := range rmHashes { - rmHashesMap[h] = struct{}{} - } - - if len(rmHashesMap) != len(rmHashes) { - return errors.New("poolAddrIndex.adjust: rmHashes contains duplicates") - } - - newHashesSize := len(existingHashes) - len(rmHashes) - if newHashesSize < 0 { - return errors.New("poolAddrIndex.adjust: rmHashes is longer than existingHashes") - } - - newHashes := make([]cipher.SHA256, 0, newHashesSize) - newHashesMap := make(map[cipher.SHA256]struct{}, newHashesSize) - - rmHashesCount := 0 - for _, h := range existingHashes { - if _, ok := rmHashesMap[h]; ok { - rmHashesCount++ - } else { - newHashes = append(newHashes, h) - newHashesMap[h] = struct{}{} - } - } - - if rmHashesCount != len(rmHashes) { - return fmt.Errorf("poolAddrIndex.adjust: rmHashes contains %d hashes not indexed for address %s", len(rmHashes)-rmHashesCount, addr.String()) - } - - for _, h := range addHashes { - if _, ok := rmHashesMap[h]; ok { - return errors.New("poolAddrIndex.adjust: hash appears in both addHashes and rmHashes") - } - - if _, ok := newHashesMap[h]; !ok { - newHashes = append(newHashes, h) - newHashesMap[h] = struct{}{} - } else { - return fmt.Errorf("poolAddrIndex.adjust: uxout hash %s is already indexed for address %s", h.Hex(), addr.String()) - } - } - - // Delete the row if hashes is empty, so that the length of the bucket can - // be used to determine the number of addresses with unspents - if len(newHashes) == 0 { - return dbutil.Delete(tx, UnspentPoolAddrIndexBkt, addr.Bytes()) - } - - return p.put(tx, addr, newHashes) -} - -// Unspents unspent outputs pool -type Unspents struct { - pool *pool - poolAddrIndex *poolAddrIndex - meta *unspentMeta -} - -// NewUnspentPool creates new unspent pool instance -func NewUnspentPool() *Unspents { - return &Unspents{ - pool: &pool{}, - poolAddrIndex: &poolAddrIndex{}, - meta: &unspentMeta{}, - } -} - -// MaybeBuildIndexes builds indexes if necessary -func (up *Unspents) MaybeBuildIndexes(tx *dbutil.Tx, headSeq uint64) error { - logger.Info("Unspents.MaybeBuildIndexes") - - // Compare the addrIndexHeight to the head block, - // if not equal, rebuild the address index - addrIndexHeight, ok, err := up.meta.getAddrIndexHeight(tx) - if err != nil { - return err - } - - if ok && addrIndexHeight == headSeq { - return nil - } - - if addrIndexHeight > headSeq { - logger.Critical().Warningf("addrIndexHeight > headSeq (%d > %d)", addrIndexHeight, headSeq) - } - - logger.Infof("Rebuilding unspent_pool_addr_index (addrHeightIndexExists=%v, addrIndexHeight=%d, headSeq=%d)", ok, addrIndexHeight, headSeq) - - return up.buildAddrIndex(tx) -} - -func (up *Unspents) buildAddrIndex(tx *dbutil.Tx) error { - logger.Info("Building unspent address index") - - if err := dbutil.Reset(tx, UnspentPoolAddrIndexBkt); err != nil { - return err - } - - addrHashes := make(map[cipher.Address][]cipher.SHA256) - - var maxBlockSeq uint64 - if err := dbutil.ForEach(tx, UnspentPoolBkt, func(k, v []byte) error { - var ux coin.UxOut - if err := decodeUxOutExact(v, &ux); err != nil { - return err - } - - if ux.Head.BkSeq > maxBlockSeq { - maxBlockSeq = ux.Head.BkSeq - } - - h := ux.Hash() - - if !bytes.Equal(k[:], h[:]) { - return errors.New("Unspent pool uxout.Hash() does not match its key") - } - - addrHashes[ux.Body.Address] = append(addrHashes[ux.Body.Address], h) - - return nil - }); err != nil { - return err - } - - if len(addrHashes) == 0 { - logger.Infof("No unspents to index") - return nil - } - - for addr, hashes := range addrHashes { - if err := up.poolAddrIndex.put(tx, addr, hashes); err != nil { - return err - } - } - - if err := up.meta.setAddrIndexHeight(tx, maxBlockSeq); err != nil { - return err - } - - logger.Infof("Indexed unspents for %d addresses", len(addrHashes)) - - return nil -} - -// ProcessBlock adds unspents from a block to the unspent pool -func (up *Unspents) ProcessBlock(tx *dbutil.Tx, b *coin.SignedBlock) error { - // Gather all transaction inputs - var inputs []cipher.SHA256 - var txnUxs coin.UxArray - for _, txn := range b.Body.Transactions { - inputs = append(inputs, txn.In...) - txnUxs = append(txnUxs, coin.CreateUnspents(b.Head, txn)...) - } - - uxs, err := up.GetArray(tx, inputs) - if err != nil { - return err - } - - xorHash, err := up.meta.getXorHash(tx) - if err != nil { - return err - } - - // Remove spent outputs - rmAddrHashes := make(map[cipher.Address][]cipher.SHA256) - for _, ux := range uxs { - xorHash = xorHash.Xor(ux.SnapshotHash()) - - h := ux.Hash() - - if err := up.pool.delete(tx, h); err != nil { - return err - } - - rmAddrHashes[ux.Body.Address] = append(rmAddrHashes[ux.Body.Address], h) - } - - // Create new outputs - txnUxHashes := make([]cipher.SHA256, len(txnUxs)) - addAddrHashes := make(map[cipher.Address][]cipher.SHA256) - for i, ux := range txnUxs { - h := ux.Hash() - txnUxHashes[i] = h - addAddrHashes[ux.Body.Address] = append(addAddrHashes[ux.Body.Address], h) - } - - // Check that the uxout exists in the pool already, otherwise xorHash will be calculated wrong - for _, h := range txnUxHashes { - if hasKey, err := up.Contains(tx, h); err != nil { - return err - } else if hasKey { - return fmt.Errorf("attempted to insert uxout:%v twice into the unspent pool", h.Hex()) - } - } - - for i, ux := range txnUxs { - // Add new outputs - if err := up.pool.put(tx, txnUxHashes[i], ux); err != nil { - return err - } - - // Recalculate xorHash - xorHash = xorHash.Xor(ux.SnapshotHash()) - } - - // Set xorHash - if err := up.meta.setXorHash(tx, xorHash); err != nil { - return err - } - - // Update indexes - for addr, rmHashes := range rmAddrHashes { - addHashes := addAddrHashes[addr] - - if err := up.poolAddrIndex.adjust(tx, addr, addHashes, rmHashes); err != nil { - return err - } - - delete(addAddrHashes, addr) - } - - for addr, addHashes := range addAddrHashes { - if err := up.poolAddrIndex.adjust(tx, addr, addHashes, nil); err != nil { - return err - } - } - - // Check that the addrIndexHeight is incremental - addrIndexHeight, ok, err := up.meta.getAddrIndexHeight(tx) - if err != nil { - return err - } - - if b.Block.Head.BkSeq == 0 { - if ok { - err := errors.New("addrIndexHeight is set but no block has been indexed yet") - logger.Critical().Error(err.Error()) - return err - } - } else if b.Block.Head.BkSeq != addrIndexHeight+1 { - err := errors.New("unspent pool processing blocks out of order") - logger.Critical().Error(err.Error()) - return err - } - - // Update the addrIndexHeight - return up.meta.setAddrIndexHeight(tx, b.Block.Head.BkSeq) -} - -// GetArray returns UxOut for a set of hashes, will return error if any of the hashes do not exist in the pool. -func (up *Unspents) GetArray(tx *dbutil.Tx, hashes []cipher.SHA256) (coin.UxArray, error) { - var uxa coin.UxArray - - for _, h := range hashes { - ux, err := up.pool.get(tx, h) - if err != nil { - return nil, err - } else if ux == nil { - return nil, NewErrUnspentNotExist(h.Hex()) - } - - uxa = append(uxa, *ux) - } - - return uxa, nil -} - -// Get returns the uxout value of given hash -func (up *Unspents) Get(tx *dbutil.Tx, h cipher.SHA256) (*coin.UxOut, error) { - return up.pool.get(tx, h) -} - -// GetAll returns Pool as an array. Note: they are not in any particular order. -func (up *Unspents) GetAll(tx *dbutil.Tx) (coin.UxArray, error) { - return up.pool.getAll(tx) -} - -// Len returns the unspent outputs num -func (up *Unspents) Len(tx *dbutil.Tx) (uint64, error) { - return dbutil.Len(tx, UnspentPoolBkt) -} - -// Contains check if the hash of uxout does exist in the pool -func (up *Unspents) Contains(tx *dbutil.Tx, h cipher.SHA256) (bool, error) { - return dbutil.BucketHasKey(tx, UnspentPoolBkt, h[:]) -} - -// GetUnspentHashesOfAddrs returns a map of addresses to their unspent output hashes -func (up *Unspents) GetUnspentHashesOfAddrs(tx *dbutil.Tx, addrs []cipher.Address) (AddressHashes, error) { - addrHashes := make(AddressHashes, len(addrs)) - - for _, addr := range addrs { - hashes, err := up.poolAddrIndex.get(tx, addr) - if err != nil { - return nil, err - } - - addrHashes[addr] = hashes - } - - return addrHashes, nil -} - -// GetUnspentsOfAddrs returns a map of addresses to their unspent outputs -func (up *Unspents) GetUnspentsOfAddrs(tx *dbutil.Tx, addrs []cipher.Address) (coin.AddressUxOuts, error) { - addrUxs := make(coin.AddressUxOuts, len(addrs)) - - for _, addr := range addrs { - hashes, err := up.poolAddrIndex.get(tx, addr) - if err != nil { - return nil, err - } - - uxa, err := up.GetArray(tx, hashes) - if err != nil { - switch e := err.(type) { - case ErrUnspentNotExist: - logger.Critical().Errorf("Unspent hash %s indexed under address %s does not exist in unspent pool", e.UxID, addr.String()) - } - return nil, err - } - - addrUxs[addr] = uxa - } - - return addrUxs, nil -} - -// GetUxHash returns unspent output checksum for the Block. -// Must be called after Block is fully initialized, -// and before its outputs are added to the unspent pool -func (up *Unspents) GetUxHash(tx *dbutil.Tx) (cipher.SHA256, error) { - return up.meta.getXorHash(tx) -} - -// AddressCount returns the total number of addresses with unspents -func (up *Unspents) AddressCount(tx *dbutil.Tx) (uint64, error) { - return dbutil.Len(tx, UnspentPoolAddrIndexBkt) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/ux_out_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/ux_out_skyencoder.go deleted file mode 100644 index c18ddd6..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/ux_out_skyencoder.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package blockdb - -import ( - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeUxOut computes the size of an encoded object of type UxOut -func encodeSizeUxOut(obj *coin.UxOut) uint64 { - i0 := uint64(0) - - // obj.Head.Time - i0 += 8 - - // obj.Head.BkSeq - i0 += 8 - - // obj.Body.SrcTransaction - i0 += 32 - - // obj.Body.Address.Version - i0++ - - // obj.Body.Address.Key - i0 += 20 - - // obj.Body.Coins - i0 += 8 - - // obj.Body.Hours - i0 += 8 - - return i0 -} - -// encodeUxOut encodes an object of type UxOut to a buffer allocated to the exact size -// required to encode the object. -func encodeUxOut(obj *coin.UxOut) ([]byte, error) { - n := encodeSizeUxOut(obj) - buf := make([]byte, n) - - if err := encodeUxOutToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUxOutToBuffer encodes an object of type UxOut to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUxOutToBuffer(buf []byte, obj *coin.UxOut) error { - if uint64(len(buf)) < encodeSizeUxOut(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Head.Time - e.Uint64(obj.Head.Time) - - // obj.Head.BkSeq - e.Uint64(obj.Head.BkSeq) - - // obj.Body.SrcTransaction - e.CopyBytes(obj.Body.SrcTransaction[:]) - - // obj.Body.Address.Version - e.Uint8(obj.Body.Address.Version) - - // obj.Body.Address.Key - e.CopyBytes(obj.Body.Address.Key[:]) - - // obj.Body.Coins - e.Uint64(obj.Body.Coins) - - // obj.Body.Hours - e.Uint64(obj.Body.Hours) - - return nil -} - -// decodeUxOut decodes an object of type UxOut from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUxOut(buf []byte, obj *coin.UxOut) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Head.Time = i - } - - { - // obj.Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Head.BkSeq = i - } - - { - // obj.Body.SrcTransaction - if len(d.Buffer) < len(obj.Body.SrcTransaction) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.SrcTransaction[:], d.Buffer[:len(obj.Body.SrcTransaction)]) - d.Buffer = d.Buffer[len(obj.Body.SrcTransaction):] - } - - { - // obj.Body.Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Body.Address.Version = i - } - - { - // obj.Body.Address.Key - if len(d.Buffer) < len(obj.Body.Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Body.Address.Key[:], d.Buffer[:len(obj.Body.Address.Key)]) - d.Buffer = d.Buffer[len(obj.Body.Address.Key):] - } - - { - // obj.Body.Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Body.Coins = i - } - - { - // obj.Body.Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Body.Hours = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUxOutExact decodes an object of type UxOut from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUxOutExact(buf []byte, obj *coin.UxOut) error { - if n, err := decodeUxOut(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/verify.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/verify.go deleted file mode 100644 index e130420..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/blockdb/verify.go +++ /dev/null @@ -1,156 +0,0 @@ -package blockdb - -import ( - "errors" - "reflect" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // ErrVerifyStopped is returned when database verification is interrupted - ErrVerifyStopped = errors.New("database verification stopped") -) - -// VerifyDBSkyencoderSafe verifies that the skyencoder generated code has the same result as the encoder -// for all data in the blockchain -func VerifyDBSkyencoderSafe(tx *dbutil.Tx, quit <-chan struct{}) error { - if quit == nil { - quit = make(chan struct{}) - } - - if err := dbutil.ForEach(tx, BlockSigsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var sig1 sigWrapper - if err := decodeSigWrapperExact(v, &sig1); err != nil { - return err - } - - var sig2 cipher.Sig - if err := encoder.DeserializeRawExact(v, &sig2); err != nil { - return err - } - - if sig1.Sig != sig2 { - return errors.New("BlockSigsBkt sig decode mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, BlocksBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 coin.Block - if err := decodeBlockExact(v, &b1); err != nil { - return err - } - - var b2 coin.Block - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1, b2) { - return errors.New("BlocksBkt block mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, TreeBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 hashPairsWrapper - if err := decodeHashPairsWrapperExact(v, &b1); err != nil { - return err - } - - var b2 []coin.HashPair - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1.HashPairs, b2) { - return errors.New("TreeBkt hash pairs mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, UnspentPoolBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 coin.UxOut - if err := decodeUxOutExact(v, &b1); err != nil { - return err - } - - var b2 coin.UxOut - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1, b2) { - return errors.New("UnspentPoolBkt ux out mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, UnspentPoolAddrIndexBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 hashesWrapper - if err := decodeHashesWrapperExact(v, &b1); err != nil { - return err - } - - var b2 []cipher.SHA256 - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1.Hashes, b2) { - return errors.New("UnspentPoolAddrIndexBkt sha256 hashes mismatch") - } - - return nil - }); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/config.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/config.go deleted file mode 100644 index f74a85a..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/config.go +++ /dev/null @@ -1,119 +0,0 @@ -package visor - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/params" -) - -// Config configuration parameters for the Visor -type Config struct { - // Is this a block publishing node - IsBlockPublisher bool - - // Public key of the blockchain - BlockchainPubkey cipher.PubKey - - // Secret key of the blockchain (required if block publisher) - BlockchainSeckey cipher.SecKey - - // Transaction verification parameters used for unconfirmed transactions - UnconfirmedVerifyTxn params.VerifyTxn - // Transaction verification parameters used when creating a block - CreateBlockVerifyTxn params.VerifyTxn - // Maximum size of a block, in bytes for creating blocks - MaxBlockTransactionsSize uint32 - - // Coin distribution parameters (necessary for txn verification) - Distribution params.Distribution - - // Where the blockchain is saved - BlockchainFile string - // Where the block signatures are saved - BlockSigsFile string - - //address for genesis - GenesisAddress cipher.Address - // Genesis block sig - GenesisSignature cipher.Sig - // Genesis block timestamp - GenesisTimestamp uint64 - // Number of coins in genesis block - GenesisCoinVolume uint64 - // enable arbitrating mode - Arbitrating bool -} - -// NewConfig creates Config -func NewConfig() Config { - c := Config{ - IsBlockPublisher: false, - - BlockchainPubkey: cipher.PubKey{}, - BlockchainSeckey: cipher.SecKey{}, - - UnconfirmedVerifyTxn: params.UserVerifyTxn, - CreateBlockVerifyTxn: params.UserVerifyTxn, - MaxBlockTransactionsSize: params.UserVerifyTxn.MaxTransactionSize, - - GenesisAddress: cipher.Address{}, - GenesisSignature: cipher.Sig{}, - GenesisTimestamp: 0, - GenesisCoinVolume: 0, //100e12, 100e6 * 10e6 - } - - return c -} - -// Verify verifies the configuration -func (c Config) Verify() error { - if c.IsBlockPublisher { - if c.BlockchainPubkey != cipher.MustPubKeyFromSecKey(c.BlockchainSeckey) { - return errors.New("Cannot run as block publisher: invalid seckey for pubkey") - } - } - - if err := c.UnconfirmedVerifyTxn.Validate(); err != nil { - return err - } - - if err := c.CreateBlockVerifyTxn.Validate(); err != nil { - return err - } - - if c.UnconfirmedVerifyTxn.BurnFactor < params.UserVerifyTxn.BurnFactor { - return fmt.Errorf("UnconfirmedVerifyTxn.BurnFactor must be >= params.UserVerifyTxn.BurnFactor (%d)", params.UserVerifyTxn.BurnFactor) - } - - if c.CreateBlockVerifyTxn.BurnFactor < params.UserVerifyTxn.BurnFactor { - return fmt.Errorf("CreateBlockVerifyTxn.BurnFactor must be >= params.UserVerifyTxn.BurnFactor (%d)", params.UserVerifyTxn.BurnFactor) - } - - if c.UnconfirmedVerifyTxn.MaxTransactionSize < params.UserVerifyTxn.MaxTransactionSize { - return fmt.Errorf("UnconfirmedVerifyTxn.MaxTransactionSize must be >= params.UserVerifyTxn.MaxTransactionSize (%d)", params.UserVerifyTxn.MaxTransactionSize) - } - - if c.CreateBlockVerifyTxn.MaxTransactionSize < params.UserVerifyTxn.MaxTransactionSize { - return fmt.Errorf("CreateBlockVerifyTxn.MaxTransactionSize must be >= params.UserVerifyTxn.MaxTransactionSize (%d)", params.UserVerifyTxn.MaxTransactionSize) - } - - if c.UnconfirmedVerifyTxn.MaxDropletPrecision < params.UserVerifyTxn.MaxDropletPrecision { - return fmt.Errorf("UnconfirmedVerifyTxn.MaxDropletPrecision must be >= params.UserVerifyTxn.MaxDropletPrecision (%d)", params.UserVerifyTxn.MaxDropletPrecision) - } - - if c.CreateBlockVerifyTxn.MaxDropletPrecision < params.UserVerifyTxn.MaxDropletPrecision { - return fmt.Errorf("CreateBlockVerifyTxn.MaxDropletPrecision must be >= params.UserVerifyTxn.MaxDropletPrecision (%d)", params.UserVerifyTxn.MaxDropletPrecision) - } - - if c.MaxBlockTransactionsSize < c.CreateBlockVerifyTxn.MaxTransactionSize { - return errors.New("MaxBlockTransactionsSize must be >= CreateBlockVerifyTxn.MaxTransactionSize") - } - - if err := c.Distribution.Validate(); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/db.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/db.go deleted file mode 100644 index c4a9c51..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/db.go +++ /dev/null @@ -1,401 +0,0 @@ -package visor - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "sync" - "time" - - "github.com/boltdb/bolt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/elapse" - "github.com/SkycoinProject/skycoin/src/visor/blockdb" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" - "github.com/SkycoinProject/skycoin/src/visor/historydb" -) - -var ( - // BlockchainVerifyTheadNum number of goroutines to use for signature and historydb verification - BlockchainVerifyTheadNum = 4 -) - -// ErrCorruptDB is returned if the database is corrupted -// The original corruption error is embedded -type ErrCorruptDB struct { - error -} - -// CheckDatabase checks the database for corruption, rebuild history if corrupted -func CheckDatabase(db *dbutil.DB, pubkey cipher.PubKey, quit chan struct{}) error { - elapser := elapse.NewElapser(time.Second*30, logger) - elapser.Register("CheckDatabase") - defer elapser.CheckForDone() - - var blocksBktExist bool - if err := db.View("CheckDatabase", func(tx *dbutil.Tx) error { - blocksBktExist = dbutil.Exists(tx, blockdb.BlocksBkt) - return nil - }); err != nil { - return err - } - - // Don't verify the db if the blocks bucket does not exist - if !blocksBktExist { - return nil - } - - bc, err := NewBlockchain(db, BlockchainConfig{Pubkey: pubkey}) - if err != nil { - return err - } - - history := historydb.New() - indexesMap := historydb.NewIndexesMap() - - var historyVerifyErr error - var lock sync.Mutex - verifyFunc := func(tx *dbutil.Tx, b *coin.SignedBlock) error { - // Verify signature - if err := bc.VerifySignature(b); err != nil { - return err - } - - // Verify historydb, we don't return the error of history.Verify here, - // as we have to check all signature, if we return error early here, the - // potential bad signature won't be detected. - lock.Lock() - defer lock.Unlock() - if historyVerifyErr == nil { - historyVerifyErr = history.Verify(tx, b, indexesMap) - } - return nil - } - - err = bc.WalkChain(BlockchainVerifyTheadNum, verifyFunc, quit) - switch err.(type) { - case nil: - lock.Lock() - err = historyVerifyErr - lock.Unlock() - return err - default: - return err - } -} - -// backup the corrypted db first, then rebuild the history DB. -func rebuildHistoryDB(db *dbutil.DB, history *historydb.HistoryDB, bc *Blockchain, quit chan struct{}) (*dbutil.DB, error) { //nolint:unused,megacheck - db, err := backupDB(db) - if err != nil { - return nil, err - } - - if err := db.Update("Rebuild history db", func(tx *dbutil.Tx) error { - if err := history.Erase(tx); err != nil { - return err - } - - headSeq, ok, err := bc.HeadSeq(tx) - if err != nil { - return err - } - - if !ok { - return errors.New("head block does not exist") - } - - for i := uint64(0); i <= headSeq; i++ { - select { - case <-quit: - return nil - default: - b, err := bc.GetSignedBlockBySeq(tx, i) - if err != nil { - return err - } - - if err := history.ParseBlock(tx, b.Block); err != nil { - return err - } - - if i%1000 == 0 { - logger.Critical().Infof("Parse block: %d", i) - } - } - } - return nil - }); err != nil { - return nil, err - } - return db, nil -} - -// backupDB makes a backup copy of the DB -func backupDB(db *dbutil.DB) (*dbutil.DB, error) { //nolint:unused,megacheck - // backup the corrupted database - dbReadOnly := db.IsReadOnly() - - dbPath := db.Path() - - if err := db.Close(); err != nil { - return nil, fmt.Errorf("Failed to close db: %v", err) - } - - corruptDBPath, err := copyCorruptDB(dbPath) - if err != nil { - return nil, fmt.Errorf("Failed to copy corrupted db: %v", err) - } - - logger.Critical().Infof("Copy corrupted db to %s", corruptDBPath) - - // Open the database again - return OpenDB(dbPath, dbReadOnly) -} - -// ResetCorruptDB checks the database for corruption and if one of the following -// error types is found, then the database is deemed to be corrupted: -// - blockdb.ErrMissingSignature, -// - historydb.ErrHistoryDBCorrupted -// - encoder.ErrBufferUnderflow -// - encoder.ErrMaxLenExceeded -// If the database is deemed to be corrupted then it is erased and the db starts over. -// A copy of the corrupted database is saved. -func ResetCorruptDB(db *dbutil.DB, pubkey cipher.PubKey, quit chan struct{}) (*dbutil.DB, error) { - err := CheckDatabase(db, pubkey, quit) - - // Check if an encoder error has been reported. - // These are not types like the errors below so cannot be included in the - // .(type) switch evaluation. - if err == encoder.ErrBufferUnderflow || err == encoder.ErrMaxLenExceeded { - logger.Critical().Errorf("Database is corrupted (encoder error), recreating db: %v", err) - return resetCorruptDB(db) - } - - switch err.(type) { - case nil: - return db, nil - case blockdb.ErrMissingSignature, - historydb.ErrHistoryDBCorrupted: - logger.Critical().Errorf("Database is corrupted, recreating db: %v", err) - return resetCorruptDB(db) - default: - return nil, err - } -} - -func rebuildCorruptDB(db *dbutil.DB, pubkey cipher.PubKey, quit chan struct{}) (*dbutil.DB, error) { //nolint:deadcode,unused,megacheck - history := historydb.New() - bc, err := NewBlockchain(db, BlockchainConfig{Pubkey: pubkey}) - if err != nil { - return nil, err - } - - return rebuildHistoryDB(db, history, bc, quit) -} - -// resetCorruptDB recreates the DB, making a backup copy marked as corrupted -func resetCorruptDB(db *dbutil.DB) (*dbutil.DB, error) { - dbReadOnly := db.IsReadOnly() - dbPath := db.Path() - - if err := db.Close(); err != nil { - return nil, fmt.Errorf("Failed to close db: %v", err) - } - - corruptDBPath, err := moveCorruptDB(dbPath) - if err != nil { - return nil, fmt.Errorf("Failed to copy corrupted db: %v", err) - } - - logger.Critical().Infof("Moved corrupted db to %s", corruptDBPath) - - return OpenDB(dbPath, dbReadOnly) -} - -// OpenDB opens the blockdb -func OpenDB(dbFile string, readOnly bool) (*dbutil.DB, error) { - db, err := bolt.Open(dbFile, 0600, &bolt.Options{ - Timeout: 5000 * time.Millisecond, - ReadOnly: readOnly, - }) - if err != nil { - return nil, fmt.Errorf("Open boltdb failed, %v", err) - } - - return dbutil.WrapDB(db), nil -} - -// moveCorruptDB moves a file to makeCorruptDBPath(dbPath) -func moveCorruptDB(dbPath string) (string, error) { - newDBPath, err := makeCorruptDBPath(dbPath) - if err != nil { - return "", err - } - - if err := os.Rename(dbPath, newDBPath); err != nil { - logger.Errorf("os.Rename(%s, %s) failed: %v", dbPath, newDBPath, err) - return "", err - } - - return newDBPath, nil -} - -// copyCorruptDB copy a file to makeCorruptDBPath(dbPath) -func copyCorruptDB(dbPath string) (string, error) { //nolint:unused,megacheck - newDBPath, err := makeCorruptDBPath(dbPath) - if err != nil { - return "", err - } - - in, err := os.Open(dbPath) - if err != nil { - return "", err - } - defer in.Close() - - out, err := os.Create(newDBPath) - if err != nil { - return "", err - } - defer out.Close() - logger.Critical().Info(out.Name()) - - _, err = io.Copy(in, out) - if err != nil { - return "", err - } - - if err := out.Close(); err != nil { - return "", err - } - - return newDBPath, nil -} - -// makeCorruptDBPath creates a $FILE.corrupt.$HASH string based on dbPath, -// where $HASH is truncated SHA1 of $FILE. -func makeCorruptDBPath(dbPath string) (string, error) { - dbFileHash, err := shaFileID(dbPath) - if err != nil { - return "", err - } - - dbDir, dbFile := filepath.Split(dbPath) - newDBFile := fmt.Sprintf("%s.corrupt.%s", dbFile, dbFileHash) - newDBPath := filepath.Join(dbDir, newDBFile) - - return newDBPath, nil -} - -// shaFileID return the first 8 bytes of the SHA1 hash of the file, -// hex-encoded -func shaFileID(dbPath string) (string, error) { - fi, err := os.Open(dbPath) - if err != nil { - return "", err - } - defer fi.Close() - - h := sha256.New() - if _, err := io.Copy(h, fi); err != nil { - return "", err - } - - sum := h.Sum(nil) - encodedSum := base64.RawURLEncoding.EncodeToString(sum[:8]) - return encodedSum, nil -} - -// VerifyDBSkyencoderSafe verifies that the skyencoder generated code has the same result as the encoder -// for all data in the blockchain -func VerifyDBSkyencoderSafe(db *dbutil.DB, quit <-chan struct{}) error { - return db.View("VerifyDBSkyencoderSafe", func(tx *dbutil.Tx) error { - return verifyDBSkyencoderSafe(tx, quit) - }) -} - -func verifyDBSkyencoderSafe(tx *dbutil.Tx, quit <-chan struct{}) error { - if quit == nil { - quit = make(chan struct{}) - } - - // blockdb - if err := blockdb.VerifyDBSkyencoderSafe(tx, quit); err != nil { - if err == blockdb.ErrVerifyStopped { - return ErrVerifyStopped - } - return err - } - - // historydb - if err := historydb.VerifyDBSkyencoderSafe(tx, quit); err != nil { - if err == historydb.ErrVerifyStopped { - return ErrVerifyStopped - } - return err - } - - // visor - if err := dbutil.ForEach(tx, UnconfirmedTxnsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 UnconfirmedTransaction - if err := decodeUnconfirmedTransactionExact(v, &b1); err != nil { - return err - } - - var b2 UnconfirmedTransaction - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1, b2) { - return errors.New("UnconfirmedTxnsBkt unconfirmed transaction mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, UnconfirmedUnspentsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 UxArray - if err := decodeUxArrayExact(v, &b1); err != nil { - return err - } - - var b2 coin.UxArray - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1.UxArray, b2) { - return errors.New("UnconfirmedUnspentsBkt ux out slice mismatch") - } - - return nil - }); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/dbutil/dbutil.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/dbutil/dbutil.go deleted file mode 100644 index f3ec633..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/dbutil/dbutil.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Package dbutil provides boltdb utility methods -*/ -package dbutil - -import ( - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "runtime/debug" - "sync" - "time" - - "github.com/boltdb/bolt" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -var ( - logger = logging.MustGetLogger("dbutil") - txViewLog = false - txViewTrace = false - txUpdateLog = false - txUpdateTrace = false - txDurationLog = true - txDurationReportingThreshold = time.Millisecond * 100 -) - -// Tx wraps a Tx -type Tx struct { - *bolt.Tx -} - -// String is implemented to prevent a panic when mocking methods with *Tx arguments. -// The mock library forces arguments to be printed with %s which causes Tx to panic. -// See https://github.com/stretchr/testify/pull/596 -func (tx *Tx) String() string { - return fmt.Sprintf("%v", tx.Tx) -} - -// DB wraps a bolt.DB to add logging -type DB struct { - ViewLog bool - ViewTrace bool - UpdateLog bool - UpdateTrace bool - DurationLog bool - DurationReportingThreshold time.Duration - - *bolt.DB - - // shutdownLock is added to prevent closing the database while a View transaction is in progress - // bolt.DB will block for Update transactions but not for View transactions, and if - // the database is closed while in a View transaction, it will panic - // This will be fixed in coreos's bbolt after this PR is merged: - // https://github.com/coreos/bbolt/pull/91 - // When coreos has this feature, we can switch to coreos's bbolt and remove this lock - shutdownLock sync.RWMutex -} - -// WrapDB returns WrapDB -func WrapDB(db *bolt.DB) *DB { - return &DB{ - ViewLog: txViewLog, - UpdateLog: txUpdateLog, - ViewTrace: txViewTrace, - UpdateTrace: txUpdateTrace, - DurationLog: txDurationLog, - DurationReportingThreshold: txDurationReportingThreshold, - DB: db, - } -} - -// View wraps *bolt.DB.View to add logging -func (db *DB) View(name string, f func(*Tx) error) error { - db.shutdownLock.RLock() - defer db.shutdownLock.RUnlock() - - if db.ViewLog { - logger.Debug("db.View [%s] starting", name) - defer logger.Debug("db.View [%s] done", name) - } - if db.ViewTrace { - debug.PrintStack() - } - - t0 := time.Now() - - err := db.DB.View(func(tx *bolt.Tx) error { - return f(&Tx{tx}) - }) - - t1 := time.Now() - delta := t1.Sub(t0) - if db.DurationLog && delta > db.DurationReportingThreshold { - logger.Debugf("db.View [%s] elapsed %s", name, delta) - } - - return err -} - -// Update wraps *bolt.DB.Update to add logging -func (db *DB) Update(name string, f func(*Tx) error) error { - db.shutdownLock.RLock() - defer db.shutdownLock.RUnlock() - - if db.UpdateLog { - logger.Debug("db.Update [%s] starting", name) - defer logger.Debug("db.Update [%s] done", name) - } - if db.UpdateTrace { - debug.PrintStack() - } - - t0 := time.Now() - - err := db.DB.Update(func(tx *bolt.Tx) error { - return f(&Tx{tx}) - }) - - t1 := time.Now() - delta := t1.Sub(t0) - if db.DurationLog && delta > db.DurationReportingThreshold { - logger.Debugf("db.Update [%s] elapsed %s", name, delta) - } - - return err -} - -// Close closes the underlying *bolt.DB -func (db *DB) Close() error { - db.shutdownLock.Lock() - defer db.shutdownLock.Unlock() - - return db.DB.Close() -} - -// ErrCreateBucketFailed is returned if creating a bolt.DB bucket fails -type ErrCreateBucketFailed struct { - Bucket string - Err error -} - -func (e ErrCreateBucketFailed) Error() string { - return fmt.Sprintf("Create bucket \"%s\" failed: %v", e.Bucket, e.Err) -} - -// NewErrCreateBucketFailed returns an ErrCreateBucketFailed -func NewErrCreateBucketFailed(bucket []byte, err error) error { - return ErrCreateBucketFailed{ - Bucket: string(bucket), - Err: err, - } -} - -// ErrBucketNotExist is returned if a bolt.DB bucket does not exist -type ErrBucketNotExist struct { - Bucket string -} - -func (e ErrBucketNotExist) Error() string { - return fmt.Sprintf("Bucket \"%s\" doesn't exist", e.Bucket) -} - -// NewErrBucketNotExist returns an ErrBucketNotExist -func NewErrBucketNotExist(bucket []byte) error { - return ErrBucketNotExist{ - Bucket: string(bucket), - } -} - -// CreateBuckets creates multiple buckets -func CreateBuckets(tx *Tx, buckets [][]byte) error { - for _, b := range buckets { - if _, err := tx.CreateBucketIfNotExists(b); err != nil { - return NewErrCreateBucketFailed(b, err) - } - } - - return nil -} - -// GetBucketObjectDecoded returns an encoder-serialized value from a bucket, decoded to an object -func GetBucketObjectDecoded(tx *Tx, bktName, key []byte, obj interface{}) (bool, error) { - v, err := GetBucketValueNoCopy(tx, bktName, key) - if err != nil { - return false, err - } else if v == nil { - return false, nil - } - - if err := encoder.DeserializeRawExact(v, obj); err != nil { - return false, fmt.Errorf("encoder.DeserializeRawExact failed: %v", err) - } - - return true, nil -} - -// GetBucketObjectJSON returns a JSON value from a bucket, unmarshaled to an object -func GetBucketObjectJSON(tx *Tx, bktName, key []byte, obj interface{}) (bool, error) { - v, err := GetBucketValueNoCopy(tx, bktName, key) - if err != nil { - return false, err - } else if v == nil { - return false, nil - } - - if err := json.Unmarshal(v, obj); err != nil { - return false, fmt.Errorf("json.Unmarshal failed: %v", err) - } - - return true, nil -} - -// GetBucketString returns a string value from a bucket -func GetBucketString(tx *Tx, bktName, key []byte) (string, bool, error) { - v, err := GetBucketValueNoCopy(tx, bktName, key) - if err != nil { - return "", false, err - } else if v == nil { - return "", false, nil - } - - return string(v), true, nil -} - -// GetBucketValue returns a []byte value from a bucket. If the bucket does not exist, -// it returns an error of type ErrBucketNotExist -func GetBucketValue(tx *Tx, bktName, key []byte) ([]byte, error) { - v, err := GetBucketValueNoCopy(tx, bktName, key) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - // Bytes returned from boltdb are not valid outside of the transaction - // they are called in, make a copy - w := make([]byte, len(v)) - copy(w[:], v[:]) - - return w, nil -} - -// GetBucketValueNoCopy returns a value from a bucket. If the bucket does not exist, -// it returns an error of type ErrBucketNotExist. The byte value is not copied so is not valid -// outside of the database transaction -func GetBucketValueNoCopy(tx *Tx, bktName, key []byte) ([]byte, error) { - bkt := tx.Bucket(bktName) - if bkt == nil { - return nil, NewErrBucketNotExist(bktName) - } - - return bkt.Get(key), nil -} - -// PutBucketValue puts a value into a bucket under key. -func PutBucketValue(tx *Tx, bktName, key, val []byte) error { - bkt := tx.Bucket(bktName) - if bkt == nil { - return NewErrBucketNotExist(bktName) - } - - return bkt.Put(key, val) -} - -// BucketHasKey returns true if a bucket has a non-nil value for a key -func BucketHasKey(tx *Tx, bktName, key []byte) (bool, error) { - bkt := tx.Bucket(bktName) - if bkt == nil { - return false, NewErrBucketNotExist(bktName) - } - - v := bkt.Get(key) - return v != nil, nil -} - -// NextSequence returns the NextSequence() from the bucket -func NextSequence(tx *Tx, bktName []byte) (uint64, error) { - bkt := tx.Bucket(bktName) - if bkt == nil { - return 0, NewErrBucketNotExist(bktName) - } - - return bkt.NextSequence() -} - -// ForEach calls ForEach on the bucket -func ForEach(tx *Tx, bktName []byte, f func(k, v []byte) error) error { - bkt := tx.Bucket(bktName) - if bkt == nil { - return NewErrBucketNotExist(bktName) - } - - return bkt.ForEach(f) -} - -// Delete deletes from a bucket -func Delete(tx *Tx, bktName, key []byte) error { - bkt := tx.Bucket(bktName) - if bkt == nil { - return NewErrBucketNotExist(bktName) - } - - return bkt.Delete(key) -} - -// Len returns the number of keys in a bucket -func Len(tx *Tx, bktName []byte) (uint64, error) { - bkt := tx.Bucket(bktName) - if bkt == nil { - return 0, NewErrBucketNotExist(bktName) - } - - bstats := bkt.Stats() - - if bstats.KeyN < 0 { - return 0, errors.New("Negative length queried from db stats") - } - - return uint64(bstats.KeyN), nil -} - -// IsEmpty returns true if the bucket is empty -func IsEmpty(tx *Tx, bktName []byte) (bool, error) { - length, err := Len(tx, bktName) - if err != nil { - return false, err - } - return length == 0, nil -} - -// Exists returns true if the bucket exists -func Exists(tx *Tx, bktName []byte) bool { - return tx.Bucket(bktName) != nil -} - -// Reset resets the bucket -func Reset(tx *Tx, bktName []byte) error { - if err := tx.DeleteBucket(bktName); err != nil { - return err - } - - _, err := tx.CreateBucket(bktName) - return err -} - -// Itob converts uint64 to bytes -func Itob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// Btoi converts bytes to uint64 -func Btoi(v []byte) uint64 { - return binary.BigEndian.Uint64(v) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/distribution.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/distribution.go deleted file mode 100644 index a373a82..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/distribution.go +++ /dev/null @@ -1,24 +0,0 @@ -package visor - -import ( - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" -) - -// TransactionIsLocked returns true if the transaction spends locked outputs -func TransactionIsLocked(d params.Distribution, inUxs coin.UxArray) bool { - lockedAddrs := d.LockedAddresses() - lockedAddrsMap := make(map[string]struct{}) - for _, a := range lockedAddrs { - lockedAddrsMap[a] = struct{}{} - } - - for _, o := range inUxs { - uxAddr := o.Body.Address.String() - if _, ok := lockedAddrsMap[uxAddr]; ok { - return true - } - } - - return false -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_txn.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_txn.go deleted file mode 100644 index cc9c96f..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_txn.go +++ /dev/null @@ -1,79 +0,0 @@ -package historydb - -import ( - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -//go:generate skyencoder -unexported -struct hashesWrapper - -// hashesWrapper wraps []cipher.SHA256 -type hashesWrapper struct { - Hashes []cipher.SHA256 -} - -// AddressTxnsBkt maps addresses to transaction hashes -var AddressTxnsBkt = []byte("address_txns") - -// addressTxn buckets for storing address related transactions -// address as key, transaction id slice as value -type addressTxns struct{} - -// get returns the transaction hashes of given address -func (atx *addressTxns) get(tx *dbutil.Tx, addr cipher.Address) ([]cipher.SHA256, error) { - var txnHashes hashesWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, AddressTxnsBkt, addr.Bytes()) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeHashesWrapperExact(v, &txnHashes); err != nil { - return nil, err - } - - return txnHashes.Hashes, nil -} - -// add adds a hash to an address's hash list -func (atx *addressTxns) add(tx *dbutil.Tx, addr cipher.Address, hash cipher.SHA256) error { - hashes, err := atx.get(tx, addr) - if err != nil { - return err - } - - // check for duplicates - for _, u := range hashes { - if u == hash { - return nil - } - } - - hashes = append(hashes, hash) - - buf, err := encodeHashesWrapper(&hashesWrapper{ - Hashes: hashes, - }) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, AddressTxnsBkt, addr.Bytes(), buf) -} - -// contains returns true if an address has transactions -func (atx *addressTxns) contains(tx *dbutil.Tx, addr cipher.Address) (bool, error) { - return dbutil.BucketHasKey(tx, AddressTxnsBkt, addr.Bytes()) -} - -// isEmpty checks if address transactions bucket is empty -func (atx *addressTxns) isEmpty(tx *dbutil.Tx) (bool, error) { - return dbutil.IsEmpty(tx, AddressTxnsBkt) -} - -// reset resets the bucket -func (atx *addressTxns) reset(tx *dbutil.Tx) error { - return dbutil.Reset(tx, AddressTxnsBkt) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_uxout.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_uxout.go deleted file mode 100644 index f23185b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/address_uxout.go +++ /dev/null @@ -1,66 +0,0 @@ -package historydb - -import ( - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -// AddressUxBkt maps addresses to unspent outputs -var AddressUxBkt = []byte("address_in") - -// bucket for storing address with UxOut, key as address, value as UxOut. -type addressUx struct{} - -// get return nil on not found. -func (au *addressUx) get(tx *dbutil.Tx, addr cipher.Address) ([]cipher.SHA256, error) { - var uxHashes hashesWrapper - - v, err := dbutil.GetBucketValueNoCopy(tx, AddressUxBkt, addr.Bytes()) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeHashesWrapperExact(v, &uxHashes); err != nil { - return nil, err - } - - return uxHashes.Hashes, nil -} - -// add adds a hash to an address's hash list -func (au *addressUx) add(tx *dbutil.Tx, address cipher.Address, uxHash cipher.SHA256) error { - hashes, err := au.get(tx, address) - if err != nil { - return err - } - - // check for duplicate hashes - for _, u := range hashes { - if u == uxHash { - return nil - } - } - - hashes = append(hashes, uxHash) - - buf, err := encodeHashesWrapper(&hashesWrapper{ - Hashes: hashes, - }) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, AddressUxBkt, address.Bytes(), buf) -} - -// isEmpty checks if the addressUx bucket is empty -func (au *addressUx) isEmpty(tx *dbutil.Tx) (bool, error) { - return dbutil.IsEmpty(tx, AddressUxBkt) -} - -// reset resets the bucket -func (au *addressUx) reset(tx *dbutil.Tx) error { - return dbutil.Reset(tx, AddressUxBkt) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/hashes_wrapper_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/hashes_wrapper_skyencoder.go deleted file mode 100644 index 36fc309..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/hashes_wrapper_skyencoder.go +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package historydb - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" -) - -// encodeSizeHashesWrapper computes the size of an encoded object of type hashesWrapper -func encodeSizeHashesWrapper(obj *hashesWrapper) uint64 { - i0 := uint64(0) - - // obj.Hashes - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Hashes)) * i1 - } - - return i0 -} - -// encodeHashesWrapper encodes an object of type hashesWrapper to a buffer allocated to the exact size -// required to encode the object. -func encodeHashesWrapper(obj *hashesWrapper) ([]byte, error) { - n := encodeSizeHashesWrapper(obj) - buf := make([]byte, n) - - if err := encodeHashesWrapperToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeHashesWrapperToBuffer encodes an object of type hashesWrapper to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeHashesWrapperToBuffer(buf []byte, obj *hashesWrapper) error { - if uint64(len(buf)) < encodeSizeHashesWrapper(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Hashes length check - if uint64(len(obj.Hashes)) > math.MaxUint32 { - return errors.New("obj.Hashes length exceeds math.MaxUint32") - } - - // obj.Hashes length - e.Uint32(uint32(len(obj.Hashes))) - - // obj.Hashes - for _, x := range obj.Hashes { - - // x - e.CopyBytes(x[:]) - - } - - return nil -} - -// decodeHashesWrapper decodes an object of type hashesWrapper from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeHashesWrapper(buf []byte, obj *hashesWrapper) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Hashes - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.Hashes = make([]cipher.SHA256, length) - - for z1 := range obj.Hashes { - { - // obj.Hashes[z1] - if len(d.Buffer) < len(obj.Hashes[z1]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Hashes[z1][:], d.Buffer[:len(obj.Hashes[z1])]) - d.Buffer = d.Buffer[len(obj.Hashes[z1]):] - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeHashesWrapperExact decodes an object of type hashesWrapper from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeHashesWrapperExact(buf []byte, obj *hashesWrapper) error { - if n, err := decodeHashesWrapper(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/history_meta.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/history_meta.go deleted file mode 100644 index 1700185..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/history_meta.go +++ /dev/null @@ -1,36 +0,0 @@ -package historydb - -import ( - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // HistoryMetaBkt holds history metadata - HistoryMetaBkt = []byte("history_meta") - parsedHeightKey = []byte("parsed_height") -) - -// historyMeta bucket for storing block history meta info -type historyMeta struct{} - -// parsedBlockSeq returns history parsed block seq -func (hm *historyMeta) parsedBlockSeq(tx *dbutil.Tx) (uint64, bool, error) { - v, err := dbutil.GetBucketValue(tx, HistoryMetaBkt, parsedHeightKey) - if err != nil { - return 0, false, err - } else if v == nil { - return 0, false, nil - } - - return dbutil.Btoi(v), true, nil -} - -// setParsedBlockSeq updates history parsed block seq -func (hm *historyMeta) setParsedBlockSeq(tx *dbutil.Tx, h uint64) error { - return dbutil.PutBucketValue(tx, HistoryMetaBkt, parsedHeightKey, dbutil.Itob(h)) -} - -// reset resets the bucket -func (hm *historyMeta) reset(tx *dbutil.Tx) error { - return dbutil.Reset(tx, HistoryMetaBkt) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/historydb.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/historydb.go deleted file mode 100644 index 08b0818..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/historydb.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Package historydb stores historical blockchain data. -*/ -package historydb - -import ( - "errors" - "fmt" - "sync" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var logger = logging.MustGetLogger("historydb") - -// CreateBuckets creates bolt.DB buckets used by the historydb -func CreateBuckets(tx *dbutil.Tx) error { - return dbutil.CreateBuckets(tx, [][]byte{ - AddressTxnsBkt, - AddressUxBkt, - HistoryMetaBkt, - UxOutsBkt, - TransactionsBkt, - }) -} - -// HistoryDB provides APIs for blockchain explorer -type HistoryDB struct { - outputs *uxOuts // outputs bucket - txns *transactions // transactions bucket - addrUx *addressUx // bucket which stores all UxOuts that address received - addrTxns *addressTxns // address related transaction bucket - meta *historyMeta // stores history meta info -} - -// New create HistoryDB instance -func New() *HistoryDB { - return &HistoryDB{ - outputs: &uxOuts{}, - txns: &transactions{}, - addrUx: &addressUx{}, - addrTxns: &addressTxns{}, - meta: &historyMeta{}, - } -} - -// NeedsReset checks if need to reset the parsed block history, -// If we have a new added bucket, we need to reset to parse -// blockchain again to get the new bucket filled. -func (hd *HistoryDB) NeedsReset(tx *dbutil.Tx) (bool, error) { - _, ok, err := hd.meta.parsedBlockSeq(tx) - if err != nil { - return false, err - } else if !ok { - return true, nil - } - - // if any of the following buckets are empty, need to reset - addrTxnsEmpty, err := hd.addrTxns.isEmpty(tx) - if err != nil { - return false, err - } - - addrUxEmpty, err := hd.addrUx.isEmpty(tx) - if err != nil { - return false, err - } - - txnsEmpty, err := hd.txns.isEmpty(tx) - if err != nil { - return false, err - } - - outputsEmpty, err := hd.outputs.isEmpty(tx) - if err != nil { - return false, err - } - - if addrTxnsEmpty || addrUxEmpty || txnsEmpty || outputsEmpty { - return true, nil - } - - return false, nil -} - -// Erase erases the entire HistoryDB -func (hd *HistoryDB) Erase(tx *dbutil.Tx) error { - logger.Debug("HistoryDB.reset") - if err := hd.addrTxns.reset(tx); err != nil { - return err - } - - if err := hd.addrUx.reset(tx); err != nil { - return err - } - - if err := hd.outputs.reset(tx); err != nil { - return err - } - - if err := hd.meta.reset(tx); err != nil { - return err - } - - return hd.txns.reset(tx) -} - -// ParsedBlockSeq returns the block seq up to which the HistoryDB is parsed -func (hd *HistoryDB) ParsedBlockSeq(tx *dbutil.Tx) (uint64, bool, error) { - return hd.meta.parsedBlockSeq(tx) -} - -// SetParsedBlockSeq sets the block seq up to which the HistoryDB is parsed -func (hd *HistoryDB) SetParsedBlockSeq(tx *dbutil.Tx, seq uint64) error { - return hd.meta.setParsedBlockSeq(tx, seq) -} - -// GetUxOuts get UxOut of specific uxIDs. -func (hd *HistoryDB) GetUxOuts(tx *dbutil.Tx, uxIDs []cipher.SHA256) ([]UxOut, error) { - return hd.outputs.getArray(tx, uxIDs) -} - -// ParseBlock builds indexes out of the block data -func (hd *HistoryDB) ParseBlock(tx *dbutil.Tx, b coin.Block) error { - for _, t := range b.Body.Transactions { - txn := Transaction{ - Txn: t, - BlockSeq: b.Seq(), - } - - spentTxnID := t.Hash() - - if err := hd.txns.put(tx, &txn); err != nil { - return err - } - - for _, in := range t.In { - o, err := hd.outputs.get(tx, in) - if err != nil { - return err - } - - if o == nil { - return errors.New("HistoryDB.ParseBlock: transaction input not found in outputs bucket") - } - - // update the output's spent block seq and txid - o.SpentBlockSeq = b.Seq() - o.SpentTxnID = spentTxnID - if err := hd.outputs.put(tx, *o); err != nil { - return err - } - - // store the IN address with txid - if err := hd.addrTxns.add(tx, o.Out.Body.Address, spentTxnID); err != nil { - return err - } - } - - // handle the tx out - uxArray := coin.CreateUnspents(b.Head, t) - for _, ux := range uxArray { - if err := hd.outputs.put(tx, UxOut{ - Out: ux, - }); err != nil { - return err - } - - if err := hd.addrUx.add(tx, ux.Body.Address, ux.Hash()); err != nil { - return err - } - - if err := hd.addrTxns.add(tx, ux.Body.Address, spentTxnID); err != nil { - return err - } - } - } - - return hd.SetParsedBlockSeq(tx, b.Seq()) -} - -// GetTransaction get transaction by hash. -func (hd HistoryDB) GetTransaction(tx *dbutil.Tx, hash cipher.SHA256) (*Transaction, error) { - return hd.txns.get(tx, hash) -} - -// GetOutputsForAddress get all uxout that the address affected. -func (hd HistoryDB) GetOutputsForAddress(tx *dbutil.Tx, addr cipher.Address) ([]UxOut, error) { - hashes, err := hd.addrUx.get(tx, addr) - if err != nil { - return nil, err - } - - return hd.outputs.getArray(tx, hashes) -} - -// GetTransactionsForAddress returns all the address related transactions -func (hd HistoryDB) GetTransactionsForAddress(tx *dbutil.Tx, addr cipher.Address) ([]Transaction, error) { - hashes, err := hd.addrTxns.get(tx, addr) - if err != nil { - return nil, err - } - - return hd.txns.getArray(tx, hashes) -} - -// AddressSeen returns true if the address appears in the blockchain -func (hd HistoryDB) AddressSeen(tx *dbutil.Tx, addr cipher.Address) (bool, error) { - return hd.addrTxns.contains(tx, addr) -} - -// ForEachTxn traverses the transactions bucket -func (hd HistoryDB) ForEachTxn(tx *dbutil.Tx, f func(cipher.SHA256, *Transaction) error) error { - return hd.txns.forEach(tx, f) -} - -// IndexesMap is a goroutine safe address indexes map -type IndexesMap struct { - value map[cipher.Address]AddressIndexes - lock sync.RWMutex -} - -// NewIndexesMap creates a IndexesMap instance -func NewIndexesMap() *IndexesMap { - return &IndexesMap{ - value: make(map[cipher.Address]AddressIndexes), - } -} - -// Load returns value of given key -func (im *IndexesMap) Load(addr cipher.Address) (AddressIndexes, bool) { - im.lock.RLock() - defer im.lock.RUnlock() - v, ok := im.value[addr] - return v, ok -} - -// Store saves address with indexes -func (im *IndexesMap) Store(addr cipher.Address, indexes AddressIndexes) { - im.lock.Lock() - defer im.lock.Unlock() - im.value[addr] = indexes -} - -// AddressIndexes represents the address indexes struct -type AddressIndexes struct { - TxnHashes map[cipher.SHA256]struct{} - UxHashes map[cipher.SHA256]struct{} -} - -// Verify checks if the historydb is corrupted -func (hd HistoryDB) Verify(tx *dbutil.Tx, b *coin.SignedBlock, indexesMap *IndexesMap) error { - for _, t := range b.Body.Transactions { - txnHash := t.Hash() - txn, err := hd.txns.get(tx, txnHash) - if err != nil { - return err - } - - if txn == nil { - err := fmt.Errorf("HistoryDB.Verify: transaction %v does not exist in historydb", txnHash.Hex()) - return ErrHistoryDBCorrupted{err} - } - - for _, in := range t.In { - // Checks the existence of transaction input - o, err := hd.outputs.get(tx, in) - if err != nil { - return err - } - - if o == nil { - err := fmt.Errorf("HistoryDB.Verify: transaction input %v does not exist in historydb", in.Hex()) - return ErrHistoryDBCorrupted{err} - } - - // Checks the output's spend block seq - if o.SpentBlockSeq != b.Seq() { - err := fmt.Errorf("HistoryDB.Verify: spend block seq of transaction input %v is wrong, should be: %v, but is %v", - in.Hex(), b.Seq(), o.SpentBlockSeq) - return ErrHistoryDBCorrupted{err} - } - - addr := o.Out.Body.Address - txnHashesMap := map[cipher.SHA256]struct{}{} - uxHashesMap := map[cipher.SHA256]struct{}{} - - // Checks if the address indexes already loaded into memory - indexes, ok := indexesMap.Load(addr) - if ok { - txnHashesMap = indexes.TxnHashes - uxHashesMap = indexes.UxHashes - } else { - txnHashes, err := hd.addrTxns.get(tx, addr) - if err != nil { - return err - } - for _, hash := range txnHashes { - txnHashesMap[hash] = struct{}{} - } - - uxHashes, err := hd.addrUx.get(tx, addr) - if err != nil { - return err - } - for _, hash := range uxHashes { - uxHashesMap[hash] = struct{}{} - } - - indexesMap.Store(addr, AddressIndexes{ - TxnHashes: txnHashesMap, - UxHashes: uxHashesMap, - }) - } - - if _, ok := txnHashesMap[txnHash]; !ok { - err := fmt.Errorf("HistoryDB.Verify: index of address transaction [%s:%s] does not exist in historydb", - addr, txnHash.Hex()) - return ErrHistoryDBCorrupted{err} - } - - if _, ok := uxHashesMap[in]; !ok { - err := fmt.Errorf("HistoryDB.Verify: index of address uxout [%s:%s] does not exist in historydb", - addr, in.Hex()) - return ErrHistoryDBCorrupted{err} - } - } - - // Checks the transaction outs - uxArray := coin.CreateUnspents(b.Head, t) - for _, ux := range uxArray { - uxHash := ux.Hash() - out, err := hd.outputs.get(tx, uxHash) - if err != nil { - return err - } - - if out == nil { - err := fmt.Errorf("HistoryDB.Verify: transaction output %s does not exist in historydb", uxHash.Hex()) - return ErrHistoryDBCorrupted{err} - } - - addr := ux.Body.Address - txnHashesMap := map[cipher.SHA256]struct{}{} - indexes, ok := indexesMap.Load(addr) - if ok { - txnHashesMap = indexes.TxnHashes - } else { - txnHashes, err := hd.addrTxns.get(tx, addr) - if err != nil { - return err - } - for _, hash := range txnHashes { - txnHashesMap[hash] = struct{}{} - } - - uxHashes, err := hd.addrUx.get(tx, addr) - if err != nil { - return err - } - - uxHashesMap := make(map[cipher.SHA256]struct{}, len(uxHashes)) - for _, hash := range uxHashes { - uxHashesMap[hash] = struct{}{} - } - - indexesMap.Store(addr, AddressIndexes{ - TxnHashes: txnHashesMap, - UxHashes: uxHashesMap, - }) - } - - if _, ok := txnHashesMap[txnHash]; !ok { - err := fmt.Errorf("HistoryDB.Verify: index of address transaction [%s:%s] does not exist in historydb", - addr, txnHash.Hex()) - return ErrHistoryDBCorrupted{err} - } - } - } - return nil -} - -// ErrHistoryDBCorrupted is returned when found the historydb is corrupted -type ErrHistoryDBCorrupted struct { - error -} - -// NewErrHistoryDBCorrupted is for user to be able to create ErrHistoryDBCorrupted instance -// outside of the package -func NewErrHistoryDBCorrupted(err error) ErrHistoryDBCorrupted { - return ErrHistoryDBCorrupted{err} -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/output.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/output.go deleted file mode 100644 index efa3238..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/output.go +++ /dev/null @@ -1,102 +0,0 @@ -package historydb - -import ( - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -//go:generate skyencoder -unexported -struct UxOut - -// UxOutsBkt holds unspent outputs -var UxOutsBkt = []byte("uxouts") - -// UxOut expend coin.UxOut struct -type UxOut struct { - Out coin.UxOut - SpentTxnID cipher.SHA256 // id of tx which spent this output. - SpentBlockSeq uint64 // block seq that spent the output. -} - -// Hash returns outhash -func (o UxOut) Hash() cipher.SHA256 { - return o.Out.Hash() -} - -// ErrUxOutNotExist is returned if an uxout is not found in historydb -type ErrUxOutNotExist struct { - UxID string -} - -// NewErrUxOutNotExist creates ErrUxOutNotExist from a UxID -func NewErrUxOutNotExist(uxID string) error { - return ErrUxOutNotExist{ - UxID: uxID, - } -} - -func (e ErrUxOutNotExist) Error() string { - return fmt.Sprintf("uxout of %s does not exist", e.UxID) -} - -// uxOuts bucket stores outputs, UxOut hash as key and Output as value. -type uxOuts struct{} - -// put sets out value -func (ux *uxOuts) put(tx *dbutil.Tx, out UxOut) error { - hash := out.Hash() - - buf, err := encodeUxOut(&out) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, UxOutsBkt, hash[:], buf) -} - -// get gets UxOut of given id -func (ux *uxOuts) get(tx *dbutil.Tx, uxID cipher.SHA256) (*UxOut, error) { - var out UxOut - - v, err := dbutil.GetBucketValueNoCopy(tx, UxOutsBkt, uxID[:]) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeUxOutExact(v, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// getArray returns uxOuts for a set of uxids, will return error if any of the uxids do not exist -func (ux *uxOuts) getArray(tx *dbutil.Tx, uxIDs []cipher.SHA256) ([]UxOut, error) { - var outs []UxOut - for _, uxID := range uxIDs { - out, err := ux.get(tx, uxID) - if err != nil { - return nil, err - } else if out == nil { - return nil, NewErrUxOutNotExist(uxID.Hex()) - } - - outs = append(outs, *out) - } - - return outs, nil -} - -// isEmpty checks if the uxout bucekt is empty -func (ux *uxOuts) isEmpty(tx *dbutil.Tx) (bool, error) { - return dbutil.IsEmpty(tx, UxOutsBkt) -} - -// reset resets the bucket -func (ux *uxOuts) reset(tx *dbutil.Tx) error { - return dbutil.Reset(tx, UxOutsBkt) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction.go deleted file mode 100644 index 2addd63..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction.go +++ /dev/null @@ -1,107 +0,0 @@ -package historydb - -// transaction.go mainly provides transaction corresponding buckets and apis, -// The transactions bucket, tx hash as key, and tx as value, it's the main bucket that stores the -// transaction value. All other buckets that index different field of transaction will only records the -// transaction hash, and get the tx value from transactions bucket. - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -//go:generate skyencoder -unexported -struct Transaction - -// Transaction contains transaction info and the seq of block which executed this block. -type Transaction struct { - Txn coin.Transaction - BlockSeq uint64 -} - -// Hash return the Txn hash. -func (txn *Transaction) Hash() cipher.SHA256 { - return txn.Txn.Hash() -} - -// TransactionsBkt holds Transactions -var TransactionsBkt = []byte("transactions") - -// Transactions transaction bucket instance. -type transactions struct{} - -// put transaction in the db -func (txs *transactions) put(tx *dbutil.Tx, txn *Transaction) error { - hash := txn.Hash() - buf, err := encodeTransaction(txn) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, TransactionsBkt, hash[:], buf) -} - -// get gets transaction by transaction hash, return nil on not found -func (txs *transactions) get(tx *dbutil.Tx, hash cipher.SHA256) (*Transaction, error) { - var txn Transaction - - v, err := dbutil.GetBucketValueNoCopy(tx, TransactionsBkt, hash[:]) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeTransactionExact(v, &txn); err != nil { - return nil, err - } - - return &txn, nil -} - -// getArray returns transactions slice of given hashes -func (txs *transactions) getArray(tx *dbutil.Tx, hashes []cipher.SHA256) ([]Transaction, error) { - txns := make([]Transaction, 0, len(hashes)) - for _, h := range hashes { - txn, err := txs.get(tx, h) - if err != nil { - return nil, err - } - if txn == nil { - return nil, errors.New("Transaction not found") - } - - txns = append(txns, *txn) - } - - return txns, nil -} - -// isEmpty checks if transaction bucket is empty -func (txs *transactions) isEmpty(tx *dbutil.Tx) (bool, error) { - return dbutil.IsEmpty(tx, TransactionsBkt) -} - -// reset resets the bucket -func (txs *transactions) reset(tx *dbutil.Tx) error { - return dbutil.Reset(tx, TransactionsBkt) -} - -// forEach traverses the transactions in db -func (txs *transactions) forEach(tx *dbutil.Tx, f func(cipher.SHA256, *Transaction) error) error { - return dbutil.ForEach(tx, TransactionsBkt, func(k, v []byte) error { - hash, err := cipher.SHA256FromBytes(k) - if err != nil { - return err - } - - var txn Transaction - if err := decodeTransactionExact(v, &txn); err != nil { - return err - } - - return f(hash, &txn) - }) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction_skyencoder.go deleted file mode 100644 index 36f2d59..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/transaction_skyencoder.go +++ /dev/null @@ -1,373 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package historydb - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeTransaction computes the size of an encoded object of type Transaction -func encodeSizeTransaction(obj *Transaction) uint64 { - i0 := uint64(0) - - // obj.Txn.Length - i0 += 4 - - // obj.Txn.Type - i0++ - - // obj.Txn.InnerHash - i0 += 32 - - // obj.Txn.Sigs - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 65 - - i0 += uint64(len(obj.Txn.Sigs)) * i1 - } - - // obj.Txn.In - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Txn.In)) * i1 - } - - // obj.Txn.Out - i0 += 4 - { - i1 := uint64(0) - - // x1.Address.Version - i1++ - - // x1.Address.Key - i1 += 20 - - // x1.Coins - i1 += 8 - - // x1.Hours - i1 += 8 - - i0 += uint64(len(obj.Txn.Out)) * i1 - } - - // obj.BlockSeq - i0 += 8 - - return i0 -} - -// encodeTransaction encodes an object of type Transaction to a buffer allocated to the exact size -// required to encode the object. -func encodeTransaction(obj *Transaction) ([]byte, error) { - n := encodeSizeTransaction(obj) - buf := make([]byte, n) - - if err := encodeTransactionToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeTransactionToBuffer encodes an object of type Transaction to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeTransactionToBuffer(buf []byte, obj *Transaction) error { - if uint64(len(buf)) < encodeSizeTransaction(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Txn.Length - e.Uint32(obj.Txn.Length) - - // obj.Txn.Type - e.Uint8(obj.Txn.Type) - - // obj.Txn.InnerHash - e.CopyBytes(obj.Txn.InnerHash[:]) - - // obj.Txn.Sigs maxlen check - if len(obj.Txn.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Txn.Sigs length check - if uint64(len(obj.Txn.Sigs)) > math.MaxUint32 { - return errors.New("obj.Txn.Sigs length exceeds math.MaxUint32") - } - - // obj.Txn.Sigs length - e.Uint32(uint32(len(obj.Txn.Sigs))) - - // obj.Txn.Sigs - for _, x := range obj.Txn.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Txn.In maxlen check - if len(obj.Txn.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Txn.In length check - if uint64(len(obj.Txn.In)) > math.MaxUint32 { - return errors.New("obj.Txn.In length exceeds math.MaxUint32") - } - - // obj.Txn.In length - e.Uint32(uint32(len(obj.Txn.In))) - - // obj.Txn.In - for _, x := range obj.Txn.In { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Txn.Out maxlen check - if len(obj.Txn.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Txn.Out length check - if uint64(len(obj.Txn.Out)) > math.MaxUint32 { - return errors.New("obj.Txn.Out length exceeds math.MaxUint32") - } - - // obj.Txn.Out length - e.Uint32(uint32(len(obj.Txn.Out))) - - // obj.Txn.Out - for _, x := range obj.Txn.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - // obj.BlockSeq - e.Uint64(obj.BlockSeq) - - return nil -} - -// decodeTransaction decodes an object of type Transaction from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeTransaction(buf []byte, obj *Transaction) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Txn.Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Txn.Length = i - } - - { - // obj.Txn.Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Txn.Type = i - } - - { - // obj.Txn.InnerHash - if len(d.Buffer) < len(obj.Txn.InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Txn.InnerHash[:], d.Buffer[:len(obj.Txn.InnerHash)]) - d.Buffer = d.Buffer[len(obj.Txn.InnerHash):] - } - - { - // obj.Txn.Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Txn.Sigs = make([]cipher.Sig, length) - - for z2 := range obj.Txn.Sigs { - { - // obj.Txn.Sigs[z2] - if len(d.Buffer) < len(obj.Txn.Sigs[z2]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Txn.Sigs[z2][:], d.Buffer[:len(obj.Txn.Sigs[z2])]) - d.Buffer = d.Buffer[len(obj.Txn.Sigs[z2]):] - } - - } - } - } - - { - // obj.Txn.In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Txn.In = make([]cipher.SHA256, length) - - for z2 := range obj.Txn.In { - { - // obj.Txn.In[z2] - if len(d.Buffer) < len(obj.Txn.In[z2]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Txn.In[z2][:], d.Buffer[:len(obj.Txn.In[z2])]) - d.Buffer = d.Buffer[len(obj.Txn.In[z2]):] - } - - } - } - } - - { - // obj.Txn.Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Txn.Out = make([]coin.TransactionOutput, length) - - for z2 := range obj.Txn.Out { - { - // obj.Txn.Out[z2].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Txn.Out[z2].Address.Version = i - } - - { - // obj.Txn.Out[z2].Address.Key - if len(d.Buffer) < len(obj.Txn.Out[z2].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Txn.Out[z2].Address.Key[:], d.Buffer[:len(obj.Txn.Out[z2].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Txn.Out[z2].Address.Key):] - } - - { - // obj.Txn.Out[z2].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Txn.Out[z2].Coins = i - } - - { - // obj.Txn.Out[z2].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Txn.Out[z2].Hours = i - } - - } - } - } - - { - // obj.BlockSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.BlockSeq = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeTransactionExact decodes an object of type Transaction from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeTransactionExact(buf []byte, obj *Transaction) error { - if n, err := decodeTransaction(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/ux_out_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/ux_out_skyencoder.go deleted file mode 100644 index eae7006..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/ux_out_skyencoder.go +++ /dev/null @@ -1,198 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package historydb - -import "github.com/SkycoinProject/skycoin/src/cipher/encoder" - -// encodeSizeUxOut computes the size of an encoded object of type UxOut -func encodeSizeUxOut(obj *UxOut) uint64 { - i0 := uint64(0) - - // obj.Out.Head.Time - i0 += 8 - - // obj.Out.Head.BkSeq - i0 += 8 - - // obj.Out.Body.SrcTransaction - i0 += 32 - - // obj.Out.Body.Address.Version - i0++ - - // obj.Out.Body.Address.Key - i0 += 20 - - // obj.Out.Body.Coins - i0 += 8 - - // obj.Out.Body.Hours - i0 += 8 - - // obj.SpentTxnID - i0 += 32 - - // obj.SpentBlockSeq - i0 += 8 - - return i0 -} - -// encodeUxOut encodes an object of type UxOut to a buffer allocated to the exact size -// required to encode the object. -func encodeUxOut(obj *UxOut) ([]byte, error) { - n := encodeSizeUxOut(obj) - buf := make([]byte, n) - - if err := encodeUxOutToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUxOutToBuffer encodes an object of type UxOut to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUxOutToBuffer(buf []byte, obj *UxOut) error { - if uint64(len(buf)) < encodeSizeUxOut(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Out.Head.Time - e.Uint64(obj.Out.Head.Time) - - // obj.Out.Head.BkSeq - e.Uint64(obj.Out.Head.BkSeq) - - // obj.Out.Body.SrcTransaction - e.CopyBytes(obj.Out.Body.SrcTransaction[:]) - - // obj.Out.Body.Address.Version - e.Uint8(obj.Out.Body.Address.Version) - - // obj.Out.Body.Address.Key - e.CopyBytes(obj.Out.Body.Address.Key[:]) - - // obj.Out.Body.Coins - e.Uint64(obj.Out.Body.Coins) - - // obj.Out.Body.Hours - e.Uint64(obj.Out.Body.Hours) - - // obj.SpentTxnID - e.CopyBytes(obj.SpentTxnID[:]) - - // obj.SpentBlockSeq - e.Uint64(obj.SpentBlockSeq) - - return nil -} - -// decodeUxOut decodes an object of type UxOut from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUxOut(buf []byte, obj *UxOut) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Out.Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out.Head.Time = i - } - - { - // obj.Out.Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out.Head.BkSeq = i - } - - { - // obj.Out.Body.SrcTransaction - if len(d.Buffer) < len(obj.Out.Body.SrcTransaction) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Out.Body.SrcTransaction[:], d.Buffer[:len(obj.Out.Body.SrcTransaction)]) - d.Buffer = d.Buffer[len(obj.Out.Body.SrcTransaction):] - } - - { - // obj.Out.Body.Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Out.Body.Address.Version = i - } - - { - // obj.Out.Body.Address.Key - if len(d.Buffer) < len(obj.Out.Body.Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Out.Body.Address.Key[:], d.Buffer[:len(obj.Out.Body.Address.Key)]) - d.Buffer = d.Buffer[len(obj.Out.Body.Address.Key):] - } - - { - // obj.Out.Body.Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out.Body.Coins = i - } - - { - // obj.Out.Body.Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Out.Body.Hours = i - } - - { - // obj.SpentTxnID - if len(d.Buffer) < len(obj.SpentTxnID) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.SpentTxnID[:], d.Buffer[:len(obj.SpentTxnID)]) - d.Buffer = d.Buffer[len(obj.SpentTxnID):] - } - - { - // obj.SpentBlockSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.SpentBlockSeq = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUxOutExact decodes an object of type UxOut from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUxOutExact(buf []byte, obj *UxOut) error { - if n, err := decodeUxOut(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/verify.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/verify.go deleted file mode 100644 index 04f81a5..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/historydb/verify.go +++ /dev/null @@ -1,129 +0,0 @@ -package historydb - -import ( - "errors" - "reflect" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // ErrVerifyStopped is returned when database verification is interrupted - ErrVerifyStopped = errors.New("database verification stopped") -) - -// VerifyDBSkyencoderSafe verifies that the skyencoder generated code has the same result as the encoder -// for all data in the blockchain -func VerifyDBSkyencoderSafe(tx *dbutil.Tx, quit <-chan struct{}) error { - if quit == nil { - quit = make(chan struct{}) - } - - if err := dbutil.ForEach(tx, AddressTxnsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 hashesWrapper - if err := decodeHashesWrapperExact(v, &b1); err != nil { - return err - } - - var b2 []cipher.SHA256 - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1.Hashes, b2) { - return errors.New("AddressTxnsBkt sha256 hashes mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, AddressUxBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 hashesWrapper - if err := decodeHashesWrapperExact(v, &b1); err != nil { - return err - } - - var b2 []cipher.SHA256 - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1.Hashes, b2) { - return errors.New("AddressUxBkt sha256 hashes mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, UxOutsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 UxOut - if err := decodeUxOutExact(v, &b1); err != nil { - return err - } - - var b2 UxOut - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1, b2) { - return errors.New("UxOutsBkt ux out mismatch") - } - - return nil - }); err != nil { - return err - } - - if err := dbutil.ForEach(tx, TransactionsBkt, func(_, v []byte) error { - select { - case <-quit: - return ErrVerifyStopped - default: - } - - var b1 Transaction - if err := decodeTransactionExact(v, &b1); err != nil { - return err - } - - var b2 Transaction - if err := encoder.DeserializeRawExact(v, &b2); err != nil { - return err - } - - if !reflect.DeepEqual(b1, b2) { - return errors.New("TransactionsBkt ux out mismatch") - } - - return nil - }); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/interfaces.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/interfaces.go deleted file mode 100644 index 8db4dbd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/interfaces.go +++ /dev/null @@ -1,71 +0,0 @@ -package visor - -import ( - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/visor/blockdb" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" - "github.com/SkycoinProject/skycoin/src/visor/historydb" -) - -//go:generate mockery -name Historyer -case underscore -inpkg -testonly -//go:generate mockery -name Blockchainer -case underscore -inpkg -testonly -//go:generate mockery -name UnconfirmedTransactionPooler -case underscore -inpkg -testonly - -// Historyer is the interface that provides methods for accessing history data that are parsed from blockchain. -type Historyer interface { - GetUxOuts(tx *dbutil.Tx, uxids []cipher.SHA256) ([]historydb.UxOut, error) - ParseBlock(tx *dbutil.Tx, b coin.Block) error - GetTransaction(tx *dbutil.Tx, hash cipher.SHA256) (*historydb.Transaction, error) - GetOutputsForAddress(tx *dbutil.Tx, address cipher.Address) ([]historydb.UxOut, error) - GetTransactionsForAddress(tx *dbutil.Tx, address cipher.Address) ([]historydb.Transaction, error) - AddressSeen(tx *dbutil.Tx, address cipher.Address) (bool, error) - NeedsReset(tx *dbutil.Tx) (bool, error) - Erase(tx *dbutil.Tx) error - ParsedBlockSeq(tx *dbutil.Tx) (uint64, bool, error) - ForEachTxn(tx *dbutil.Tx, f func(cipher.SHA256, *historydb.Transaction) error) error -} - -// Blockchainer is the interface that provides methods for accessing the blockchain data -type Blockchainer interface { - GetGenesisBlock(tx *dbutil.Tx) (*coin.SignedBlock, error) - GetBlocks(tx *dbutil.Tx, seqs []uint64) ([]coin.SignedBlock, error) - GetBlocksInRange(tx *dbutil.Tx, start, end uint64) ([]coin.SignedBlock, error) - GetLastBlocks(tx *dbutil.Tx, n uint64) ([]coin.SignedBlock, error) - GetSignedBlockByHash(tx *dbutil.Tx, hash cipher.SHA256) (*coin.SignedBlock, error) - GetSignedBlockBySeq(tx *dbutil.Tx, seq uint64) (*coin.SignedBlock, error) - Unspent() blockdb.UnspentPooler - Len(tx *dbutil.Tx) (uint64, error) - Head(tx *dbutil.Tx) (*coin.SignedBlock, error) - HeadSeq(tx *dbutil.Tx) (uint64, bool, error) - Time(tx *dbutil.Tx) (uint64, error) - NewBlock(tx *dbutil.Tx, txns coin.Transactions, currentTime uint64) (*coin.Block, error) - ExecuteBlock(tx *dbutil.Tx, sb *coin.SignedBlock) error - VerifyBlock(tx *dbutil.Tx, sb *coin.SignedBlock) error - VerifyBlockTxnConstraints(tx *dbutil.Tx, txn coin.Transaction) error - VerifySingleTxnHardConstraints(tx *dbutil.Tx, txn coin.Transaction, signed TxnSignedFlag) error - VerifySingleTxnSoftHardConstraints(tx *dbutil.Tx, txn coin.Transaction, distParams params.Distribution, verifyParams params.VerifyTxn, signed TxnSignedFlag) (*coin.SignedBlock, coin.UxArray, error) - TransactionFee(tx *dbutil.Tx, hours uint64) coin.FeeCalculator -} - -// UnconfirmedTransactionPooler is the interface that provides methods for -// accessing the unconfirmed transaction pool -type UnconfirmedTransactionPooler interface { - SetTransactionsAnnounced(tx *dbutil.Tx, hashes map[cipher.SHA256]int64) error - InjectTransaction(tx *dbutil.Tx, bc Blockchainer, t coin.Transaction, distParams params.Distribution, verifyParams params.VerifyTxn) (bool, *ErrTxnViolatesSoftConstraint, error) - AllRawTransactions(tx *dbutil.Tx) (coin.Transactions, error) - RemoveTransactions(tx *dbutil.Tx, txns []cipher.SHA256) error - Refresh(tx *dbutil.Tx, bc Blockchainer, distParams params.Distribution, verifyParams params.VerifyTxn) ([]cipher.SHA256, error) - RemoveInvalid(tx *dbutil.Tx, bc Blockchainer) ([]cipher.SHA256, error) - FilterKnown(tx *dbutil.Tx, txns []cipher.SHA256) ([]cipher.SHA256, error) - GetKnown(tx *dbutil.Tx, txns []cipher.SHA256) (coin.Transactions, error) - RecvOfAddresses(tx *dbutil.Tx, bh coin.BlockHeader, addrs []cipher.Address) (coin.AddressUxOuts, error) - GetIncomingOutputs(tx *dbutil.Tx, bh coin.BlockHeader) (coin.UxArray, error) - Get(tx *dbutil.Tx, hash cipher.SHA256) (*UnconfirmedTransaction, error) - GetFiltered(tx *dbutil.Tx, filter func(tx UnconfirmedTransaction) bool) ([]UnconfirmedTransaction, error) - GetHashes(tx *dbutil.Tx, filter func(tx UnconfirmedTransaction) bool) ([]cipher.SHA256, error) - ForEach(tx *dbutil.Tx, f func(cipher.SHA256, UnconfirmedTransaction) error) error - GetUnspentsOfAddr(tx *dbutil.Tx, addr cipher.Address) (coin.UxArray, error) - Len(tx *dbutil.Tx) (uint64, error) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/meta.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/meta.go deleted file mode 100644 index 7beea7d..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/meta.go +++ /dev/null @@ -1,71 +0,0 @@ -package visor - -import ( - "fmt" - - "github.com/blang/semver" - - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // MetaBkt stores data about the application DB - MetaBkt = []byte("db_meta") - - versionKey = []byte("version") -) - -// GetDBVersion returns the saved DB version -func GetDBVersion(db *dbutil.DB) (*semver.Version, error) { - var v *semver.Version - if err := db.View("GetDBVersion", func(tx *dbutil.Tx) error { - var err error - v, err = getDBVersion(tx) - return err - }); err != nil { - return nil, err - } - - return v, nil -} - -func getDBVersion(tx *dbutil.Tx) (*semver.Version, error) { - v, err := dbutil.GetBucketValue(tx, MetaBkt, versionKey) - if err != nil { - switch err.(type) { - case dbutil.ErrBucketNotExist: - return nil, nil - default: - return nil, err - } - } else if v == nil { - return nil, nil - } - - sv, err := semver.Make(string(v)) - if err != nil { - return nil, err - } - - return &sv, nil -} - -// SetDBVersion sets the DB version -func SetDBVersion(db *dbutil.DB, version semver.Version) error { - return db.Update("SetDBVersion", func(tx *dbutil.Tx) error { - if _, err := tx.CreateBucketIfNotExists(MetaBkt); err != nil { - return err - } - - oldVersion, err := getDBVersion(tx) - if err != nil { - return err - } - - if oldVersion != nil && oldVersion.GT(version) { - return fmt.Errorf("SetDBVersion cannot regress version from %v to %v", oldVersion, version) - } - - return dbutil.PutBucketValue(tx, MetaBkt, versionKey, []byte(version.String())) - }) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/objects.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/objects.go deleted file mode 100644 index 2bf1341..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/objects.go +++ /dev/null @@ -1,213 +0,0 @@ -package visor - -import ( - "time" - - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/transaction" -) - -// Transaction wraps around coin.Transaction, tagged with its status. This allows us -// to include unconfirmed txns -type Transaction struct { - Transaction coin.Transaction - Status TransactionStatus - Time uint64 -} - -// TransactionStatus represents the transaction status -type TransactionStatus struct { - Confirmed bool - // If confirmed, how many blocks deep in the chain it is. Will be at least 1 if confirmed. - Height uint64 - // If confirmed, the sequence of the block in which the transaction was executed - BlockSeq uint64 -} - -// NewUnconfirmedTransactionStatus creates unconfirmed transaction status -func NewUnconfirmedTransactionStatus() TransactionStatus { - return TransactionStatus{ - Confirmed: false, - Height: 0, - BlockSeq: 0, - } -} - -// NewConfirmedTransactionStatus creates confirmed transaction status -func NewConfirmedTransactionStatus(height, blockSeq uint64) TransactionStatus { - // Height starts at 1 - // TODO -- height should start at 0? - if height == 0 { - logger.Panic("Invalid confirmed transaction height") - } - return TransactionStatus{ - Confirmed: true, - Height: height, - BlockSeq: blockSeq, - } -} - -// TransactionInput includes the UxOut spent in a transaction and the calculated hours of the output at spending time -type TransactionInput struct { - UxOut coin.UxOut - CalculatedHours uint64 -} - -// NewTransactionInput creates a TransactionInput. -// calculateHoursTime is the time against which the CalculatedHours should be computed -func NewTransactionInput(ux coin.UxOut, calculateHoursTime uint64) (TransactionInput, error) { - // The overflow bug causes this to fail for some transactions, allow it to pass - calculatedHours, err := ux.CoinHours(calculateHoursTime) - if err != nil { - logger.Critical().Warningf("Ignoring NewTransactionInput ux.CoinHours failed: %v", err) - calculatedHours = 0 - } - - return TransactionInput{ - UxOut: ux, - CalculatedHours: calculatedHours, - }, nil -} - -// NewTransactionInputs creates []TransactionInput from []coin.UxOut. -// Assumes all coin.UxOuts have their coin hours calculated from the same reference time. -func NewTransactionInputs(uxa []coin.UxOut, calculateHoursTime uint64) ([]TransactionInput, error) { - if len(uxa) == 0 { - return nil, nil - } - - inputs := make([]TransactionInput, len(uxa)) - for i, x := range uxa { - var err error - inputs[i], err = NewTransactionInput(x, calculateHoursTime) - if err != nil { - return nil, err - } - } - - return inputs, nil -} - -// TransactionInputFromUxBalance converts transaction.UxBalance to TransactionInput -func TransactionInputFromUxBalance(x transaction.UxBalance) TransactionInput { - var t TransactionInput - t.CalculatedHours = x.Hours - t.UxOut.Head.BkSeq = x.BkSeq - t.UxOut.Head.Time = x.Time - t.UxOut.Body.Address = x.Address - t.UxOut.Body.Coins = x.Coins - t.UxOut.Body.Hours = x.InitialHours - t.UxOut.Body.SrcTransaction = x.SrcTransaction - - if t.UxOut.Hash() != x.Hash { - logger.Panic("Reconstructed coin.UxOut from transaction.UxBalance hash does not match") - } - - return t -} - -// NewTransactionInputsFromUxBalance converts []transaction.UxBalance to []TransactionInput -func NewTransactionInputsFromUxBalance(uxb []transaction.UxBalance) []TransactionInput { - if len(uxb) == 0 { - return nil - } - - inputs := make([]TransactionInput, len(uxb)) - for i, x := range uxb { - inputs[i] = TransactionInputFromUxBalance(x) - } - - return inputs -} - -// BlockchainMetadata encapsulates useful information from the coin.Blockchain -type BlockchainMetadata struct { - // Most recent block - HeadBlock coin.SignedBlock - // Number of unspent outputs in the coin.Blockchain - Unspents uint64 - // Number of known unconfirmed txns - Unconfirmed uint64 -} - -// NewBlockchainMetadata creates blockchain meta data -func NewBlockchainMetadata(head coin.SignedBlock, unconfirmedLen, unspentsLen uint64) (*BlockchainMetadata, error) { - return &BlockchainMetadata{ - HeadBlock: head, - Unspents: unspentsLen, - Unconfirmed: unconfirmedLen, - }, nil -} - -// UnconfirmedTransaction unconfirmed transaction -type UnconfirmedTransaction struct { - Transaction coin.Transaction - // Time the txn was last received - Received int64 - // Time the txn was last checked against the blockchain - Checked int64 - // Last time we announced this txn - Announced int64 - // If this txn is valid - IsValid int8 -} - -// NewUnconfirmedTransaction creates an UnconfirmedTransaction -func NewUnconfirmedTransaction(txn coin.Transaction) UnconfirmedTransaction { - now := time.Now().UTC() - return UnconfirmedTransaction{ - Transaction: txn, - Received: now.UnixNano(), - Checked: now.UnixNano(), - Announced: time.Time{}.UnixNano(), - IsValid: 0, - } -} - -// UnspentOutput includes coin.UxOut and adds CalculatedHours -type UnspentOutput struct { - coin.UxOut - CalculatedHours uint64 -} - -// NewUnspentOutput creates an UnspentOutput -func NewUnspentOutput(uxOut coin.UxOut, calculateHoursTime uint64) (UnspentOutput, error) { - calculatedHours, err := uxOut.CoinHours(calculateHoursTime) - - // Treat overflowing coin hours calculations as a non-error and force hours to 0 - // This affects one bad spent output which had overflowed hours, spent in block 13277. - switch err { - case nil: - case coin.ErrAddEarnedCoinHoursAdditionOverflow: - calculatedHours = 0 - default: - return UnspentOutput{}, err - } - - return UnspentOutput{ - UxOut: uxOut, - CalculatedHours: calculatedHours, - }, nil -} - -// NewUnspentOutputs creates []UnspentOutput -func NewUnspentOutputs(uxOuts []coin.UxOut, calculateHoursTime uint64) ([]UnspentOutput, error) { - outs := make([]UnspentOutput, len(uxOuts)) - for i, ux := range uxOuts { - u, err := NewUnspentOutput(ux, calculateHoursTime) - if err != nil { - return nil, err - } - outs[i] = u - } - - return outs, nil -} - -// UnspentOutputsSummary includes current unspent outputs and incoming and outgoing unspent outputs -type UnspentOutputsSummary struct { - HeadBlock *coin.SignedBlock - Confirmed []UnspentOutput - Outgoing []UnspentOutput - Incoming []UnspentOutput -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/richlist.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/richlist.go deleted file mode 100644 index 28a4164..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/richlist.go +++ /dev/null @@ -1,64 +0,0 @@ -package visor - -import ( - "bytes" - "sort" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -// RichlistBalance holds info an address balance holder -type RichlistBalance struct { - Address cipher.Address - Coins uint64 - Locked bool -} - -// Richlist contains RichlistBalances -type Richlist []RichlistBalance - -// NewRichlist create Richlist via unspent outputs map -func NewRichlist(allAccounts map[cipher.Address]uint64, lockedAddrs map[cipher.Address]struct{}) (Richlist, error) { - richlist := make(Richlist, 0, len(allAccounts)) - - for addr, coins := range allAccounts { - var islocked bool - if _, ok := lockedAddrs[addr]; ok { - islocked = true - } - - richlist = append(richlist, RichlistBalance{ - Address: addr, - Coins: coins, - Locked: islocked, - }) - } - - // Sort order: - // Higher coins - // Locked > unlocked - // Address bytes - sort.Slice(richlist, func(i, j int) bool { - if richlist[i].Coins == richlist[j].Coins { - if richlist[i].Locked == richlist[j].Locked { - return bytes.Compare(richlist[i].Address.Bytes(), richlist[j].Address.Bytes()) < 0 - } - return richlist[i].Locked - } - - return richlist[i].Coins > richlist[j].Coins - }) - - return richlist, nil -} - -// FilterAddresses returns the richlist without addresses from the map -func (r Richlist) FilterAddresses(addrs map[cipher.Address]struct{}) Richlist { - var s Richlist - for _, b := range r { - if _, ok := addrs[b.Address]; !ok { - s = append(s, b) - } - } - return s -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed.go deleted file mode 100644 index 510a1b0..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed.go +++ /dev/null @@ -1,561 +0,0 @@ -package visor - -import ( - "errors" - "fmt" - "time" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" -) - -var ( - // UnconfirmedTxnsBkt holds unconfirmed transactions - UnconfirmedTxnsBkt = []byte("unconfirmed_txns") - // UnconfirmedUnspentsBkt holds unconfirmed unspent outputs - UnconfirmedUnspentsBkt = []byte("unconfirmed_unspents") - - errUpdateObjectDoesNotExist = errors.New("object does not exist in bucket") -) - -//go:generate skyencoder -unexported -struct UnconfirmedTransaction -//go:generate skyencoder -unexported -struct UxArray - -// UxArray wraps coin.UxArray -type UxArray struct { - UxArray coin.UxArray -} - -// unconfirmed transactions bucket -type unconfirmedTxns struct{} - -func (utb *unconfirmedTxns) get(tx *dbutil.Tx, hash cipher.SHA256) (*UnconfirmedTransaction, error) { - var txn UnconfirmedTransaction - - v, err := dbutil.GetBucketValueNoCopy(tx, UnconfirmedTxnsBkt, []byte(hash.Hex())) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } - - if err := decodeUnconfirmedTransactionExact(v, &txn); err != nil { - return nil, err - } - - txnHash := txn.Transaction.Hash() - if hash != txnHash { - return nil, fmt.Errorf("DB key %s does not match block hash header %s", hash, txnHash) - } - - return &txn, nil -} - -func (utb *unconfirmedTxns) put(tx *dbutil.Tx, v *UnconfirmedTransaction) error { - h := v.Transaction.Hash() - buf, err := encodeUnconfirmedTransaction(v) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, UnconfirmedTxnsBkt, []byte(h.Hex()), buf) -} - -func (utb *unconfirmedTxns) update(tx *dbutil.Tx, hash cipher.SHA256, f func(v *UnconfirmedTransaction) error) error { - txn, err := utb.get(tx, hash) - if err != nil { - return err - } - - if txn == nil { - return errUpdateObjectDoesNotExist - } - - if err := f(txn); err != nil { - return err - } - - return utb.put(tx, txn) -} - -func (utb *unconfirmedTxns) delete(tx *dbutil.Tx, hash cipher.SHA256) error { - return dbutil.Delete(tx, UnconfirmedTxnsBkt, []byte(hash.Hex())) -} - -func (utb *unconfirmedTxns) getAll(tx *dbutil.Tx) ([]UnconfirmedTransaction, error) { - var txns []UnconfirmedTransaction - - if err := dbutil.ForEach(tx, UnconfirmedTxnsBkt, func(_, v []byte) error { - var txn UnconfirmedTransaction - if err := decodeUnconfirmedTransactionExact(v, &txn); err != nil { - return err - } - - txns = append(txns, txn) - return nil - }); err != nil { - return nil, err - } - - return txns, nil -} - -func (utb *unconfirmedTxns) hasKey(tx *dbutil.Tx, hash cipher.SHA256) (bool, error) { - return dbutil.BucketHasKey(tx, UnconfirmedTxnsBkt, []byte(hash.Hex())) -} - -func (utb *unconfirmedTxns) forEach(tx *dbutil.Tx, f func(hash cipher.SHA256, tx UnconfirmedTransaction) error) error { - return dbutil.ForEach(tx, UnconfirmedTxnsBkt, func(k, v []byte) error { - hash, err := cipher.SHA256FromHex(string(k)) - if err != nil { - return err - } - - var txn UnconfirmedTransaction - if err := decodeUnconfirmedTransactionExact(v, &txn); err != nil { - return err - } - - return f(hash, txn) - }) -} - -func (utb *unconfirmedTxns) len(tx *dbutil.Tx) (uint64, error) { - return dbutil.Len(tx, UnconfirmedTxnsBkt) -} - -type txnUnspents struct{} - -func (txus *txnUnspents) put(tx *dbutil.Tx, hash cipher.SHA256, uxs coin.UxArray) error { - buf, err := encodeUxArray(&UxArray{ - UxArray: uxs, - }) - if err != nil { - return err - } - - return dbutil.PutBucketValue(tx, UnconfirmedUnspentsBkt, []byte(hash.Hex()), buf) -} - -func (txus *txnUnspents) delete(tx *dbutil.Tx, hash cipher.SHA256) error { - return dbutil.Delete(tx, UnconfirmedUnspentsBkt, []byte(hash.Hex())) -} - -func (txus *txnUnspents) getByAddr(tx *dbutil.Tx, a cipher.Address) (coin.UxArray, error) { - var uxo coin.UxArray - - if err := dbutil.ForEach(tx, UnconfirmedUnspentsBkt, func(_, v []byte) error { - var uxa UxArray - if err := decodeUxArrayExact(v, &uxa); err != nil { - return err - } - - for i := range uxa.UxArray { - if uxa.UxArray[i].Body.Address == a { - uxo = append(uxo, uxa.UxArray[i]) - } - } - - return nil - }); err != nil { - return nil, err - } - - return uxo, nil -} - -// UnconfirmedTransactionPool manages unconfirmed transactions -type UnconfirmedTransactionPool struct { - db *dbutil.DB - txns *unconfirmedTxns - // Predicted unspents, assuming txns are valid. Needed to predict - // our future balance and avoid double spending our own coins - // Maps from Transaction.Hash() to UxArray. - unspent *txnUnspents -} - -// NewUnconfirmedTransactionPool creates an UnconfirmedTransactionPool instance -func NewUnconfirmedTransactionPool(db *dbutil.DB) (*UnconfirmedTransactionPool, error) { - if err := db.View("Check unconfirmed txn pool size", func(tx *dbutil.Tx) error { - n, err := dbutil.Len(tx, UnconfirmedTxnsBkt) - if err != nil { - return err - } - - logger.Infof("Unconfirmed transaction pool size: %d", n) - return nil - }); err != nil { - return nil, err - } - - return &UnconfirmedTransactionPool{ - db: db, - txns: &unconfirmedTxns{}, - unspent: &txnUnspents{}, - }, nil -} - -// SetTransactionsAnnounced updates announced time of specific tx -func (utp *UnconfirmedTransactionPool) SetTransactionsAnnounced(tx *dbutil.Tx, hashes map[cipher.SHA256]int64) error { - var txns []*UnconfirmedTransaction - for h, t := range hashes { - txn, err := utp.txns.get(tx, h) - if err != nil { - return err - } - - if txn == nil { - logger.Warningf("UnconfirmedTransactionPool.SetTransactionsAnnounced: UnconfirmedTransaction %s not found in DB", h.Hex()) - continue - } - - if t > txn.Announced { - txn.Announced = t - txns = append(txns, txn) - } - } - - for _, txn := range txns { - if err := utp.txns.put(tx, txn); err != nil { - return err - } - } - - return nil -} - -// InjectTransaction adds a coin.Transaction to the pool, or updates an existing one's timestamps -// Returns an error if txn is invalid, and whether the transaction already -// existed in the pool. -// If the transaction violates hard constraints, it is rejected. -// Soft constraints violations mark a txn as invalid, but the txn is inserted. The soft violation is returned. -func (utp *UnconfirmedTransactionPool) InjectTransaction(tx *dbutil.Tx, bc Blockchainer, txn coin.Transaction, distParams params.Distribution, verifyParams params.VerifyTxn) (bool, *ErrTxnViolatesSoftConstraint, error) { - var isValid int8 = 1 - var softErr *ErrTxnViolatesSoftConstraint - if _, _, err := bc.VerifySingleTxnSoftHardConstraints(tx, txn, distParams, verifyParams, TxnSigned); err != nil { - logger.Warningf("bc.VerifySingleTxnSoftHardConstraints failed for txn %s: %v", txn.Hash().Hex(), err) - switch e := err.(type) { - case ErrTxnViolatesSoftConstraint: - softErr = &e - isValid = 0 - case ErrTxnViolatesHardConstraint: - return false, nil, err - default: - return false, nil, err - } - } - - hash := txn.Hash() - known, err := utp.txns.hasKey(tx, hash) - if err != nil { - logger.Errorf("InjectTransaction check txn exists failed: %v", err) - return false, nil, err - } - - // Update if we already have this txn - if known { - if err := utp.txns.update(tx, hash, func(utxn *UnconfirmedTransaction) error { - now := time.Now().UTC().UnixNano() - utxn.Received = now - utxn.Checked = now - utxn.IsValid = isValid - return nil - }); err != nil { - logger.Errorf("InjectTransaction update known txn failed: %v", err) - return false, nil, err - } - - return true, softErr, nil - } - - utx := NewUnconfirmedTransaction(txn) - utx.IsValid = isValid - - // add txn to index - if err := utp.txns.put(tx, &utx); err != nil { - logger.Errorf("InjectTransaction put new unconfirmed txn failed: %v", err) - return false, nil, err - } - - head, err := bc.Head(tx) - if err != nil { - logger.Errorf("InjectTransaction bc.Head() failed: %v", err) - return false, nil, err - } - - // update unconfirmed unspent - createdUnspents := coin.CreateUnspents(head.Head, txn) - if err := utp.unspent.put(tx, hash, createdUnspents); err != nil { - logger.Errorf("InjectTransaction put new unspent outputs: %v", err) - return false, nil, err - } - - return false, softErr, nil -} - -// AllRawTransactions returns underlying coin.Transactions -func (utp *UnconfirmedTransactionPool) AllRawTransactions(tx *dbutil.Tx) (coin.Transactions, error) { - utxns, err := utp.txns.getAll(tx) - if err != nil { - return nil, err - } - - txns := make(coin.Transactions, len(utxns)) - for i := range utxns { - txns[i] = utxns[i].Transaction - } - return txns, nil -} - -// Remove a single txn by hash -func (utp *UnconfirmedTransactionPool) removeTransaction(tx *dbutil.Tx, txHash cipher.SHA256) error { - if err := utp.txns.delete(tx, txHash); err != nil { - return err - } - - return utp.unspent.delete(tx, txHash) -} - -// RemoveTransactions remove transactions with dbutil.Tx -func (utp *UnconfirmedTransactionPool) RemoveTransactions(tx *dbutil.Tx, txHashes []cipher.SHA256) error { - for i := range txHashes { - if err := utp.removeTransaction(tx, txHashes[i]); err != nil { - return err - } - } - - return nil -} - -// Refresh checks all unconfirmed txns against the blockchain. -// If the transaction becomes invalid it is marked invalid. -// If the transaction becomes valid it is marked valid and is returned to the caller. -func (utp *UnconfirmedTransactionPool) Refresh(tx *dbutil.Tx, bc Blockchainer, distParams params.Distribution, verifyParams params.VerifyTxn) ([]cipher.SHA256, error) { - utxns, err := utp.txns.getAll(tx) - if err != nil { - return nil, err - } - - now := time.Now().UTC() - var nowValid []cipher.SHA256 - - for _, utxn := range utxns { - utxn.Checked = now.UnixNano() - - _, _, err := bc.VerifySingleTxnSoftHardConstraints(tx, utxn.Transaction, distParams, verifyParams, TxnSigned) - - switch err.(type) { - case ErrTxnViolatesSoftConstraint, ErrTxnViolatesHardConstraint: - utxn.IsValid = 0 - case nil: - if utxn.IsValid == 0 { - nowValid = append(nowValid, utxn.Transaction.Hash()) - } - utxn.IsValid = 1 - default: - return nil, err - } - - if err := utp.txns.put(tx, &utxn); err != nil { - return nil, err - } - } - - return nowValid, nil -} - -// RemoveInvalid checks all unconfirmed txns against the blockchain. -// If a transaction violates hard constraints it is removed from the pool. -// The transactions that were removed are returned. -func (utp *UnconfirmedTransactionPool) RemoveInvalid(tx *dbutil.Tx, bc Blockchainer) ([]cipher.SHA256, error) { - var removeUtxns []cipher.SHA256 - - utxns, err := utp.txns.getAll(tx) - if err != nil { - return nil, err - } - - for _, utxn := range utxns { - err := bc.VerifySingleTxnHardConstraints(tx, utxn.Transaction, TxnSigned) - if err != nil { - switch err.(type) { - case ErrTxnViolatesHardConstraint: - removeUtxns = append(removeUtxns, utxn.Transaction.Hash()) - default: - return nil, err - } - } - } - - if err := utp.RemoveTransactions(tx, removeUtxns); err != nil { - return nil, err - } - - return removeUtxns, nil -} - -// FilterKnown returns txn hashes with known ones removed -func (utp *UnconfirmedTransactionPool) FilterKnown(tx *dbutil.Tx, txns []cipher.SHA256) ([]cipher.SHA256, error) { - var unknown []cipher.SHA256 - - for _, h := range txns { - if hasKey, err := utp.txns.hasKey(tx, h); err != nil { - return nil, err - } else if !hasKey { - unknown = append(unknown, h) - } - } - - return unknown, nil -} - -// GetKnown returns all known transactions from the pool, given hashes to select -func (utp *UnconfirmedTransactionPool) GetKnown(tx *dbutil.Tx, txns []cipher.SHA256) (coin.Transactions, error) { - var known coin.Transactions - - for _, h := range txns { - if tx, err := utp.txns.get(tx, h); err != nil { - return nil, err - } else if tx != nil { - known = append(known, tx.Transaction) - } - } - - return known, nil -} - -// RecvOfAddresses returns unconfirmed receiving uxouts of addresses -func (utp *UnconfirmedTransactionPool) RecvOfAddresses(tx *dbutil.Tx, bh coin.BlockHeader, addrs []cipher.Address) (coin.AddressUxOuts, error) { - addrm := make(map[cipher.Address]struct{}, len(addrs)) - for _, addr := range addrs { - addrm[addr] = struct{}{} - } - - auxs := make(coin.AddressUxOuts, len(addrs)) - if err := utp.txns.forEach(tx, func(_ cipher.SHA256, txn UnconfirmedTransaction) error { - for i, o := range txn.Transaction.Out { - if _, ok := addrm[o.Address]; ok { - uxout, err := coin.CreateUnspent(bh, txn.Transaction, i) - if err != nil { - return err - } - - auxs[o.Address] = append(auxs[o.Address], uxout) - } - } - return nil - }); err != nil { - return nil, err - } - - return auxs, nil -} - -// txnOutputsForAddrs returns unspent outputs assigned to addresses in addrs, created by a set of transactions -func txnOutputsForAddrs(bh coin.BlockHeader, addrs []cipher.Address, txns []coin.Transaction) (coin.AddressUxOuts, error) { - if len(txns) == 0 || len(addrs) == 0 { - return nil, nil - } - - addrm := make(map[cipher.Address]struct{}, len(addrs)) - for _, addr := range addrs { - addrm[addr] = struct{}{} - } - - auxs := make(coin.AddressUxOuts, len(addrs)) - - for _, txn := range txns { - for i, o := range txn.Out { - if _, ok := addrm[o.Address]; ok { - uxout, err := coin.CreateUnspent(bh, txn, i) - if err != nil { - return nil, err - } - - auxs[o.Address] = append(auxs[o.Address], uxout) - } - } - } - - return auxs, nil -} - -// GetIncomingOutputs returns all predicted incoming outputs. -func (utp *UnconfirmedTransactionPool) GetIncomingOutputs(tx *dbutil.Tx, bh coin.BlockHeader) (coin.UxArray, error) { - var outs coin.UxArray - - if err := utp.txns.forEach(tx, func(_ cipher.SHA256, txn UnconfirmedTransaction) error { - outs = append(outs, coin.CreateUnspents(bh, txn.Transaction)...) - return nil - }); err != nil { - return nil, err - } - - return outs, nil -} - -// Get returns the unconfirmed transaction of given tx hash. -func (utp *UnconfirmedTransactionPool) Get(tx *dbutil.Tx, hash cipher.SHA256) (*UnconfirmedTransaction, error) { - return utp.txns.get(tx, hash) -} - -// GetFiltered returns all transactions that can pass the filter -func (utp *UnconfirmedTransactionPool) GetFiltered(tx *dbutil.Tx, filter func(UnconfirmedTransaction) bool) ([]UnconfirmedTransaction, error) { - var txns []UnconfirmedTransaction - - if err := utp.txns.forEach(tx, func(_ cipher.SHA256, txn UnconfirmedTransaction) error { - if filter(txn) { - txns = append(txns, txn) - } - return nil - }); err != nil { - logger.Errorf("GetFiltered error: %v", err) - return nil, err - } - - return txns, nil -} - -// GetHashes returns transaction hashes that can pass the filter -func (utp *UnconfirmedTransactionPool) GetHashes(tx *dbutil.Tx, filter func(UnconfirmedTransaction) bool) ([]cipher.SHA256, error) { - var hashes []cipher.SHA256 - - if err := utp.txns.forEach(tx, func(hash cipher.SHA256, txn UnconfirmedTransaction) error { - if filter(txn) { - hashes = append(hashes, hash) - } - return nil - }); err != nil { - logger.Errorf("GetHashes error: %v", err) - return nil, err - } - - return hashes, nil -} - -// ForEach iterate the pool with given callback function -func (utp *UnconfirmedTransactionPool) ForEach(tx *dbutil.Tx, f func(cipher.SHA256, UnconfirmedTransaction) error) error { - return utp.txns.forEach(tx, f) -} - -// GetUnspentsOfAddr returns unspent outputs of given address in unspent tx pool -func (utp *UnconfirmedTransactionPool) GetUnspentsOfAddr(tx *dbutil.Tx, addr cipher.Address) (coin.UxArray, error) { - return utp.unspent.getByAddr(tx, addr) -} - -// IsValid can be used as filter function -func IsValid(tx UnconfirmedTransaction) bool { - return tx.IsValid == 1 -} - -// All use as return all filter -func All(tx UnconfirmedTransaction) bool { - return true -} - -// Len returns the number of unconfirmed transactions -func (utp *UnconfirmedTransactionPool) Len(tx *dbutil.Tx) (uint64, error) { - return utp.txns.len(tx) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed_transaction_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed_transaction_skyencoder.go deleted file mode 100644 index 37365a0..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/unconfirmed_transaction_skyencoder.go +++ /dev/null @@ -1,418 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package visor - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeUnconfirmedTransaction computes the size of an encoded object of type UnconfirmedTransaction -func encodeSizeUnconfirmedTransaction(obj *UnconfirmedTransaction) uint64 { - i0 := uint64(0) - - // obj.Transaction.Length - i0 += 4 - - // obj.Transaction.Type - i0++ - - // obj.Transaction.InnerHash - i0 += 32 - - // obj.Transaction.Sigs - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 65 - - i0 += uint64(len(obj.Transaction.Sigs)) * i1 - } - - // obj.Transaction.In - i0 += 4 - { - i1 := uint64(0) - - // x1 - i1 += 32 - - i0 += uint64(len(obj.Transaction.In)) * i1 - } - - // obj.Transaction.Out - i0 += 4 - { - i1 := uint64(0) - - // x1.Address.Version - i1++ - - // x1.Address.Key - i1 += 20 - - // x1.Coins - i1 += 8 - - // x1.Hours - i1 += 8 - - i0 += uint64(len(obj.Transaction.Out)) * i1 - } - - // obj.Received - i0 += 8 - - // obj.Checked - i0 += 8 - - // obj.Announced - i0 += 8 - - // obj.IsValid - i0++ - - return i0 -} - -// encodeUnconfirmedTransaction encodes an object of type UnconfirmedTransaction to a buffer allocated to the exact size -// required to encode the object. -func encodeUnconfirmedTransaction(obj *UnconfirmedTransaction) ([]byte, error) { - n := encodeSizeUnconfirmedTransaction(obj) - buf := make([]byte, n) - - if err := encodeUnconfirmedTransactionToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUnconfirmedTransactionToBuffer encodes an object of type UnconfirmedTransaction to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUnconfirmedTransactionToBuffer(buf []byte, obj *UnconfirmedTransaction) error { - if uint64(len(buf)) < encodeSizeUnconfirmedTransaction(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.Transaction.Length - e.Uint32(obj.Transaction.Length) - - // obj.Transaction.Type - e.Uint8(obj.Transaction.Type) - - // obj.Transaction.InnerHash - e.CopyBytes(obj.Transaction.InnerHash[:]) - - // obj.Transaction.Sigs maxlen check - if len(obj.Transaction.Sigs) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transaction.Sigs length check - if uint64(len(obj.Transaction.Sigs)) > math.MaxUint32 { - return errors.New("obj.Transaction.Sigs length exceeds math.MaxUint32") - } - - // obj.Transaction.Sigs length - e.Uint32(uint32(len(obj.Transaction.Sigs))) - - // obj.Transaction.Sigs - for _, x := range obj.Transaction.Sigs { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Transaction.In maxlen check - if len(obj.Transaction.In) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transaction.In length check - if uint64(len(obj.Transaction.In)) > math.MaxUint32 { - return errors.New("obj.Transaction.In length exceeds math.MaxUint32") - } - - // obj.Transaction.In length - e.Uint32(uint32(len(obj.Transaction.In))) - - // obj.Transaction.In - for _, x := range obj.Transaction.In { - - // x - e.CopyBytes(x[:]) - - } - - // obj.Transaction.Out maxlen check - if len(obj.Transaction.Out) > 65535 { - return encoder.ErrMaxLenExceeded - } - - // obj.Transaction.Out length check - if uint64(len(obj.Transaction.Out)) > math.MaxUint32 { - return errors.New("obj.Transaction.Out length exceeds math.MaxUint32") - } - - // obj.Transaction.Out length - e.Uint32(uint32(len(obj.Transaction.Out))) - - // obj.Transaction.Out - for _, x := range obj.Transaction.Out { - - // x.Address.Version - e.Uint8(x.Address.Version) - - // x.Address.Key - e.CopyBytes(x.Address.Key[:]) - - // x.Coins - e.Uint64(x.Coins) - - // x.Hours - e.Uint64(x.Hours) - - } - - // obj.Received - e.Int64(obj.Received) - - // obj.Checked - e.Int64(obj.Checked) - - // obj.Announced - e.Int64(obj.Announced) - - // obj.IsValid - e.Int8(obj.IsValid) - - return nil -} - -// decodeUnconfirmedTransaction decodes an object of type UnconfirmedTransaction from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUnconfirmedTransaction(buf []byte, obj *UnconfirmedTransaction) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.Transaction.Length - i, err := d.Uint32() - if err != nil { - return 0, err - } - obj.Transaction.Length = i - } - - { - // obj.Transaction.Type - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transaction.Type = i - } - - { - // obj.Transaction.InnerHash - if len(d.Buffer) < len(obj.Transaction.InnerHash) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transaction.InnerHash[:], d.Buffer[:len(obj.Transaction.InnerHash)]) - d.Buffer = d.Buffer[len(obj.Transaction.InnerHash):] - } - - { - // obj.Transaction.Sigs - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transaction.Sigs = make([]cipher.Sig, length) - - for z2 := range obj.Transaction.Sigs { - { - // obj.Transaction.Sigs[z2] - if len(d.Buffer) < len(obj.Transaction.Sigs[z2]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transaction.Sigs[z2][:], d.Buffer[:len(obj.Transaction.Sigs[z2])]) - d.Buffer = d.Buffer[len(obj.Transaction.Sigs[z2]):] - } - - } - } - } - - { - // obj.Transaction.In - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transaction.In = make([]cipher.SHA256, length) - - for z2 := range obj.Transaction.In { - { - // obj.Transaction.In[z2] - if len(d.Buffer) < len(obj.Transaction.In[z2]) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transaction.In[z2][:], d.Buffer[:len(obj.Transaction.In[z2])]) - d.Buffer = d.Buffer[len(obj.Transaction.In[z2]):] - } - - } - } - } - - { - // obj.Transaction.Out - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length > 65535 { - return 0, encoder.ErrMaxLenExceeded - } - - if length != 0 { - obj.Transaction.Out = make([]coin.TransactionOutput, length) - - for z2 := range obj.Transaction.Out { - { - // obj.Transaction.Out[z2].Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.Transaction.Out[z2].Address.Version = i - } - - { - // obj.Transaction.Out[z2].Address.Key - if len(d.Buffer) < len(obj.Transaction.Out[z2].Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.Transaction.Out[z2].Address.Key[:], d.Buffer[:len(obj.Transaction.Out[z2].Address.Key)]) - d.Buffer = d.Buffer[len(obj.Transaction.Out[z2].Address.Key):] - } - - { - // obj.Transaction.Out[z2].Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transaction.Out[z2].Coins = i - } - - { - // obj.Transaction.Out[z2].Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.Transaction.Out[z2].Hours = i - } - - } - } - } - - { - // obj.Received - i, err := d.Int64() - if err != nil { - return 0, err - } - obj.Received = i - } - - { - // obj.Checked - i, err := d.Int64() - if err != nil { - return 0, err - } - obj.Checked = i - } - - { - // obj.Announced - i, err := d.Int64() - if err != nil { - return 0, err - } - obj.Announced = i - } - - { - // obj.IsValid - i, err := d.Int8() - if err != nil { - return 0, err - } - obj.IsValid = i - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUnconfirmedTransactionExact decodes an object of type UnconfirmedTransaction from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUnconfirmedTransactionExact(buf []byte, obj *UnconfirmedTransaction) error { - if n, err := decodeUnconfirmedTransaction(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/ux_array_skyencoder.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/ux_array_skyencoder.go deleted file mode 100644 index 8461105..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/ux_array_skyencoder.go +++ /dev/null @@ -1,216 +0,0 @@ -// Code generated by github.com/SkycoinProject/skyencoder. DO NOT EDIT. - -package visor - -import ( - "errors" - "math" - - "github.com/SkycoinProject/skycoin/src/cipher/encoder" - "github.com/SkycoinProject/skycoin/src/coin" -) - -// encodeSizeUxArray computes the size of an encoded object of type UxArray -func encodeSizeUxArray(obj *UxArray) uint64 { - i0 := uint64(0) - - // obj.UxArray - i0 += 4 - { - i1 := uint64(0) - - // x1.Head.Time - i1 += 8 - - // x1.Head.BkSeq - i1 += 8 - - // x1.Body.SrcTransaction - i1 += 32 - - // x1.Body.Address.Version - i1++ - - // x1.Body.Address.Key - i1 += 20 - - // x1.Body.Coins - i1 += 8 - - // x1.Body.Hours - i1 += 8 - - i0 += uint64(len(obj.UxArray)) * i1 - } - - return i0 -} - -// encodeUxArray encodes an object of type UxArray to a buffer allocated to the exact size -// required to encode the object. -func encodeUxArray(obj *UxArray) ([]byte, error) { - n := encodeSizeUxArray(obj) - buf := make([]byte, n) - - if err := encodeUxArrayToBuffer(buf, obj); err != nil { - return nil, err - } - - return buf, nil -} - -// encodeUxArrayToBuffer encodes an object of type UxArray to a []byte buffer. -// The buffer must be large enough to encode the object, otherwise an error is returned. -func encodeUxArrayToBuffer(buf []byte, obj *UxArray) error { - if uint64(len(buf)) < encodeSizeUxArray(obj) { - return encoder.ErrBufferUnderflow - } - - e := &encoder.Encoder{ - Buffer: buf[:], - } - - // obj.UxArray length check - if uint64(len(obj.UxArray)) > math.MaxUint32 { - return errors.New("obj.UxArray length exceeds math.MaxUint32") - } - - // obj.UxArray length - e.Uint32(uint32(len(obj.UxArray))) - - // obj.UxArray - for _, x := range obj.UxArray { - - // x.Head.Time - e.Uint64(x.Head.Time) - - // x.Head.BkSeq - e.Uint64(x.Head.BkSeq) - - // x.Body.SrcTransaction - e.CopyBytes(x.Body.SrcTransaction[:]) - - // x.Body.Address.Version - e.Uint8(x.Body.Address.Version) - - // x.Body.Address.Key - e.CopyBytes(x.Body.Address.Key[:]) - - // x.Body.Coins - e.Uint64(x.Body.Coins) - - // x.Body.Hours - e.Uint64(x.Body.Hours) - - } - - return nil -} - -// decodeUxArray decodes an object of type UxArray from a buffer. -// Returns the number of bytes used from the buffer to decode the object. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -func decodeUxArray(buf []byte, obj *UxArray) (uint64, error) { - d := &encoder.Decoder{ - Buffer: buf[:], - } - - { - // obj.UxArray - - ul, err := d.Uint32() - if err != nil { - return 0, err - } - - length := int(ul) - if length < 0 || length > len(d.Buffer) { - return 0, encoder.ErrBufferUnderflow - } - - if length != 0 { - obj.UxArray = make([]coin.UxOut, length) - - for z1 := range obj.UxArray { - { - // obj.UxArray[z1].Head.Time - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.UxArray[z1].Head.Time = i - } - - { - // obj.UxArray[z1].Head.BkSeq - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.UxArray[z1].Head.BkSeq = i - } - - { - // obj.UxArray[z1].Body.SrcTransaction - if len(d.Buffer) < len(obj.UxArray[z1].Body.SrcTransaction) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.UxArray[z1].Body.SrcTransaction[:], d.Buffer[:len(obj.UxArray[z1].Body.SrcTransaction)]) - d.Buffer = d.Buffer[len(obj.UxArray[z1].Body.SrcTransaction):] - } - - { - // obj.UxArray[z1].Body.Address.Version - i, err := d.Uint8() - if err != nil { - return 0, err - } - obj.UxArray[z1].Body.Address.Version = i - } - - { - // obj.UxArray[z1].Body.Address.Key - if len(d.Buffer) < len(obj.UxArray[z1].Body.Address.Key) { - return 0, encoder.ErrBufferUnderflow - } - copy(obj.UxArray[z1].Body.Address.Key[:], d.Buffer[:len(obj.UxArray[z1].Body.Address.Key)]) - d.Buffer = d.Buffer[len(obj.UxArray[z1].Body.Address.Key):] - } - - { - // obj.UxArray[z1].Body.Coins - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.UxArray[z1].Body.Coins = i - } - - { - // obj.UxArray[z1].Body.Hours - i, err := d.Uint64() - if err != nil { - return 0, err - } - obj.UxArray[z1].Body.Hours = i - } - - } - } - } - - return uint64(len(buf) - len(d.Buffer)), nil -} - -// decodeUxArrayExact decodes an object of type UxArray from a buffer. -// If the buffer not long enough to decode the object, returns encoder.ErrBufferUnderflow. -// If the buffer is longer than required to decode the object, returns encoder.ErrRemainingBytes. -func decodeUxArrayExact(buf []byte, obj *UxArray) error { - if n, err := decodeUxArray(buf, obj); err != nil { - return err - } else if n != uint64(len(buf)) { - return encoder.ErrRemainingBytes - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/verify.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/verify.go deleted file mode 100644 index f4c333f..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/verify.go +++ /dev/null @@ -1,336 +0,0 @@ -package visor - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/fee" -) - -/* - -verify.go: Methods for handling transaction verification - -There are two levels of transaction constraint: HARD and SOFT -There are two situations in which transactions are verified: - * When included in a block - * When not in a block - -For transactions in a block, use VerifyBlockTxnConstraints. -For transactions outside of a block, use VerifySingleTxnHardConstraints and VerifySingleTxnSoftConstraints. - -VerifyBlockTxnConstraints only checks hard constraints. Soft constraints do not apply for transactions inside of a block. - -Soft and hard constraints have special handling for single transactions. - -When the transaction is received over the network, a transaction is not injected to the pool if it violates the HARD constraints. -If it violates soft constraints, it is still injected to the pool (TODO: with expiration) but is not rebroadcast to peers. -If it does not violate any constraints it is injected and rebroadcast to peers. - -When the transaction is created by the user (with create_rawtx or /spend), SOFT and HARD constraints apply, to prevent -the user from injecting a transaction to their local pool that cannot be confirmed. - -When creating a new block from transactions, SOFT and HARD constraints apply. - -Transactions in the unconfirmed pool are periodically checked for validity. (TODO: audit/implement this feature) -The transaction pool state transfer phases are as follows: - valid -> hard_invalid: remove - valid -> soft_invalid: mark as invalid - soft_invalid -> valid: mark as valid, broadcast - soft_invalid -> hard_invalid: remove - soft_invalid -> expired: remove - -HARD constraints can NEVER be violated. These include: - - Malformed transaction - - Double spends - - NOTE: Double spend verification must be done against the unspent output set, - the methods here do not operate on the unspent output set. - They accept a `uxIn coin.UxArray` argument, which are the unspents associated - with the transaction's inputs. The unspents must be queried from the unspent - output set first, thus if any unspent is not found for the input, it cannot be spent. - -SOFT constraints are based upon mutable parameters. These include: - - Max block size (transaction must not be larger than this value) - - Insufficient coin hour burn fee - - Timelocked distribution addresses - - Decimal place restrictions - -NOTE: Due to a bug which allowed overflowing output coin hours to be included in a block, - overflowing output coin hours are not checked when adding a signed block, so that the existing blocks can be processed. - When creating or receiving a single transaction from the network, it is treated as a HARD constraint. - -These methods should be called via the Blockchain object when possible, -using Blockchain.VerifyBlockTxnConstraints, Blockchain.VerifySingleTxnHardConstraints and Blockchain.VerifySingleTxnSoftHardConstraints -since data from the blockchain and unspent output set are required to fully validate a transaction. - -*/ - -var ( - // ErrTxnExceedsMaxBlockSize transaction size exceeds the max block size - ErrTxnExceedsMaxBlockSize = errors.New("Transaction size bigger than max block size") - // ErrTxnIsLocked transaction has locked address inputs - ErrTxnIsLocked = errors.New("Transaction has locked address inputs") -) - -// TxnSignedFlag indicates if the transaction is unsigned or not -type TxnSignedFlag int - -const ( - // TxnSigned is used for signed transactions - TxnSigned TxnSignedFlag = 1 - // TxnUnsigned is used for unsigned transactions - TxnUnsigned TxnSignedFlag = 2 -) - -// ErrTxnViolatesHardConstraint is returned when a transaction violates hard constraints -type ErrTxnViolatesHardConstraint struct { - Err error -} - -// NewErrTxnViolatesHardConstraint creates ErrTxnViolatesHardConstraint -func NewErrTxnViolatesHardConstraint(err error) error { - if err == nil { - return nil - } - return ErrTxnViolatesHardConstraint{ - Err: err, - } -} - -func (e ErrTxnViolatesHardConstraint) Error() string { - return fmt.Sprintf("Transaction violates hard constraint: %v", e.Err) -} - -// ErrTxnViolatesSoftConstraint is returned when a transaction violates soft constraints -type ErrTxnViolatesSoftConstraint struct { - Err error -} - -// NewErrTxnViolatesSoftConstraint creates ErrTxnViolatesSoftConstraint -func NewErrTxnViolatesSoftConstraint(err error) error { - if err == nil { - return nil - } - return ErrTxnViolatesSoftConstraint{ - Err: err, - } -} - -func (e ErrTxnViolatesSoftConstraint) Error() string { - return fmt.Sprintf("Transaction violates soft constraint: %v", e.Err) -} - -// ErrTxnViolatesUserConstraint is returned when a transaction violates user constraints -type ErrTxnViolatesUserConstraint struct { - Err error -} - -// NewErrTxnViolatesUserConstraint creates ErrTxnViolatesUserConstraint -func NewErrTxnViolatesUserConstraint(err error) error { - if err == nil { - return nil - } - return ErrTxnViolatesUserConstraint{ - Err: err, - } -} - -func (e ErrTxnViolatesUserConstraint) Error() string { - return fmt.Sprintf("Transaction violates user constraint: %v", e.Err) -} - -// VerifySingleTxnSoftConstraints returns an error if any "soft" constraint are violated. -// "soft" constraints are enforced at the network and block publication level, -// but are not enforced at the blockchain level. -// Clients will not accept blocks that violate hard constraints, but will -// accept blocks that violate soft constraints. -// Checks: -// * That the transaction size is not greater than the max block total transaction size -// * That the transaction burn enough coin hours (the fee) -// * That if that transaction does not spend from a locked distribution address -// * That the transaction does not create outputs with a higher decimal precision than is allowed -func VerifySingleTxnSoftConstraints(txn coin.Transaction, headTime uint64, uxIn coin.UxArray, distParams params.Distribution, verifyParams params.VerifyTxn) error { - if err := verifyTxnSoftConstraints(txn, headTime, uxIn, distParams, verifyParams); err != nil { - return NewErrTxnViolatesSoftConstraint(err) - } - - return nil -} - -func verifyTxnSoftConstraints(txn coin.Transaction, headTime uint64, uxIn coin.UxArray, distParams params.Distribution, verifyParams params.VerifyTxn) error { - txnSize, err := txn.Size() - if err != nil { - return ErrTxnExceedsMaxBlockSize - } - - if txnSize > verifyParams.MaxTransactionSize { - return ErrTxnExceedsMaxBlockSize - } - - f, err := fee.TransactionFee(&txn, headTime, uxIn) - if err != nil { - return err - } - - if err := fee.VerifyTransactionFee(&txn, f, verifyParams.BurnFactor); err != nil { - return err - } - - if TransactionIsLocked(distParams, uxIn) { - return ErrTxnIsLocked - } - - // Reject transactions that do not conform to decimal restrictions - for _, o := range txn.Out { - if err := params.DropletPrecisionCheck(verifyParams.MaxDropletPrecision, o.Coins); err != nil { - return err - } - } - - return nil -} - -// VerifySingleTxnHardConstraints returns an error if any "hard" constraints are violated. -// "hard" constraints are always enforced and if violated the transaction -// should not be included in any block and any block that includes such a transaction -// should be rejected. -// Checks: -// * That the inputs to the transaction exist -// * That the transaction does not create or destroy coins -// * That the signatures on the transaction are valid -// * That there are no duplicate ux inputs -// * That there are no duplicate outputs -// * That the transaction input and output coins do not overflow uint64 -// * That the transaction input and output hours do not overflow uint64 -// NOTE: Double spends are checked against the unspent output pool when querying for uxIn -func VerifySingleTxnHardConstraints(txn coin.Transaction, head coin.BlockHeader, uxIn coin.UxArray, signed TxnSignedFlag) error { - // Check for output hours overflow - // When verifying a single transaction, this is considered a hard constraint. - // For transactions inside of a block, it is a soft constraint. - // This is due to a bug which allowed some blocks to be published with overflowing hours, - // otherwise this would always be a hard constraint. - if _, err := txn.OutputHours(); err != nil { - return NewErrTxnViolatesHardConstraint(err) - } - - // Check for input CoinHours calculation overflow, since it is ignored by - // VerifyTransactionHoursSpending - for _, ux := range uxIn { - if _, err := ux.CoinHours(head.Time); err != nil { - return NewErrTxnViolatesHardConstraint(err) - } - } - - if err := verifyTxnHardConstraints(txn, head, uxIn, signed); err != nil { - return NewErrTxnViolatesHardConstraint(err) - } - - return nil -} - -// VerifyBlockTxnConstraints returns an error if any "hard" constraints are violated. -// "hard" constraints are always enforced and if violated the transaction -// should not be included in any block and any block that includes such a transaction -// should be rejected. -// Checks: -// * That the inputs to the transaction exist -// * That the transaction does not create or destroy coins -// * That the signatures on the transaction are valid -// * That there are no duplicate ux inputs -// * That there are no duplicate outputs -// * That the transaction input and output coins do not overflow uint64 -// * That the transaction input hours do not overflow uint64 -// NOTE: Double spends are checked against the unspent output pool when querying for uxIn -// NOTE: output hours overflow is treated as a soft constraint for transactions inside of a block, due to a bug -// which allowed some blocks to be published with overflowing output hours. -func VerifyBlockTxnConstraints(txn coin.Transaction, head coin.BlockHeader, uxIn coin.UxArray) error { - if err := verifyTxnHardConstraints(txn, head, uxIn, TxnSigned); err != nil { - return NewErrTxnViolatesHardConstraint(err) - } - - return nil -} - -func verifyTxnHardConstraints(txn coin.Transaction, head coin.BlockHeader, uxIn coin.UxArray, signed TxnSignedFlag) error { - //CHECKLIST: DONE: check for duplicate ux inputs/double spending - // NOTE: Double spends are checked against the unspent output pool when querying for uxIn - - //CHECKLIST: DONE: check that inputs of transaction have not been spent - //CHECKLIST: DONE: check there are no duplicate outputs - - // Q: why are coin hours based on last block time and not - // current time? - // A: no two computers will agree on system time. Need system clock - // indepedent timing that everyone agrees on. fee values would depend on - // local clock - - // Check transaction type and length - // Check for duplicate outputs - // Check for duplicate inputs - // Check for invalid hash - // Check for no inputs - // Check for no outputs - // Check for zero coin outputs - // Check valid looking signatures - - switch signed { - case TxnSigned: - if err := txn.Verify(); err != nil { - return err - } - - // Check that signatures are allowed to spend inputs - if err := txn.VerifyInputSignatures(uxIn); err != nil { - return err - } - case TxnUnsigned: - if err := txn.VerifyUnsigned(); err != nil { - return err - } - - // Check that signatures are allowed to spend inputs for signatures that are not null - if err := txn.VerifyPartialInputSignatures(uxIn); err != nil { - return err - } - default: - logger.Panic("Invalid TxnSignedFlag") - } - - uxOut := coin.CreateUnspents(head, txn) - - // Check that there are any duplicates within this set - // NOTE: This should already be checked by txn.Verify() - if uxOut.HasDupes() { - return errors.New("Duplicate output in transaction") - } - - // Check that no coins are created or destroyed - if err := coin.VerifyTransactionCoinsSpending(uxIn, uxOut); err != nil { - return err - } - - // Check that no hours are created - // NOTE: this check doesn't catch overflow errors in the addition of hours - // Some blocks had their hours overflow, and if this rule was checked here, - // existing blocks would invalidate. - // The hours overflow check is handled as an extra step in the SingleTxnHard constraints, - // to allow existing blocks which violate the overflow rules to pass. - return coin.VerifyTransactionHoursSpending(head.Time, uxIn, uxOut) -} - -// VerifySingleTxnUserConstraints applies additional verification for a -// transaction created by the user. -// This is distinct from transactions created by other users (i.e. received over the network), -// and from transactions included in blocks. -func VerifySingleTxnUserConstraints(txn coin.Transaction) error { - for _, o := range txn.Out { - if o.Address.Null() { - err := errors.New("Transaction output is sent to the null address") - return NewErrTxnViolatesUserConstraint(err) - } - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/visor.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/visor.go deleted file mode 100644 index 9ab353a..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/visor.go +++ /dev/null @@ -1,2469 +0,0 @@ -/* -Package visor manages the blockchain database and wallets - -All conceptual database operations must use a database transaction. -Callers of visor methods must ensure they do not make multiple calls without a transaction, -unless it is determined safe to do so. - -Wallet access is also gatewayed by visor, since the wallet data relates to the blockchain database. -Wallets are conceptually a second database. -*/ -package visor - -import ( - "errors" - "fmt" - "sort" - - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/util/logging" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/util/timeutil" - "github.com/SkycoinProject/skycoin/src/visor/blockdb" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" - "github.com/SkycoinProject/skycoin/src/visor/historydb" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -var logger = logging.MustGetLogger("visor") - -// Visor manages the blockchain -type Visor struct { - Config Config - - startedAt time.Time - db *dbutil.DB - unconfirmed UnconfirmedTransactionPooler - blockchain Blockchainer - history Historyer - wallets *wallet.Service -} - -// New creates a Visor for managing the blockchain database -func New(c Config, db *dbutil.DB, wltServ *wallet.Service) (*Visor, error) { - logger.Info("Creating new visor") - if c.IsBlockPublisher { - logger.Info("Visor running in block publisher mode") - } - - if err := c.Verify(); err != nil { - return nil, err - } - - logger.Infof("Coinhour burn factor for unconfirmed transactions is %d", c.UnconfirmedVerifyTxn.BurnFactor) - logger.Infof("Max transaction size for unconfirmed transactions is %d", c.UnconfirmedVerifyTxn.MaxTransactionSize) - logger.Infof("Max decimals for unconfirmed transactions is %d", c.UnconfirmedVerifyTxn.MaxDropletPrecision) - logger.Infof("Coinhour burn factor for transactions when creating blocks is %d", c.CreateBlockVerifyTxn.BurnFactor) - logger.Infof("Max transaction size for transactions when creating blocks is %d", c.CreateBlockVerifyTxn.MaxTransactionSize) - logger.Infof("Max decimals for transactions when creating blocks is %d", c.CreateBlockVerifyTxn.MaxDropletPrecision) - logger.Infof("Max block size is %d", c.MaxBlockTransactionsSize) - - if !db.IsReadOnly() { - if err := CreateBuckets(db); err != nil { - logger.WithError(err).Error("CreateBuckets failed") - return nil, err - } - } - - bc, err := NewBlockchain(db, BlockchainConfig{ - Pubkey: c.BlockchainPubkey, - Arbitrating: c.Arbitrating, - }) - if err != nil { - return nil, err - } - - history := historydb.New() - - if !db.IsReadOnly() { - if err := db.Update("build unspent indexes and init history", func(tx *dbutil.Tx) error { - headSeq, _, err := bc.HeadSeq(tx) - if err != nil { - return err - } - - if err := bc.Unspent().MaybeBuildIndexes(tx, headSeq); err != nil { - return err - } - - return initHistory(tx, bc, history) - }); err != nil { - return nil, err - } - } - - utp, err := NewUnconfirmedTransactionPool(db) - if err != nil { - return nil, err - } - - v := &Visor{ - Config: c, - startedAt: time.Now(), - db: db, - blockchain: bc, - unconfirmed: utp, - history: history, - wallets: wltServ, - } - - return v, nil -} - -// VisorConfig returns Config -func (vs *Visor) VisorConfig() Config { - return vs.Config -} - -// Init initializes starts the visor -func (vs *Visor) Init() error { - logger.Info("Visor init") - - if vs.db.IsReadOnly() { - return nil - } - - return vs.db.Update("visor init", func(tx *dbutil.Tx) error { - if err := vs.maybeCreateGenesisBlock(tx); err != nil { - return err - } - - removed, err := vs.unconfirmed.RemoveInvalid(tx, vs.blockchain) - if err != nil { - return err - } - logger.Infof("Removed %d invalid txns from pool", len(removed)) - - return nil - }) -} - -func initHistory(tx *dbutil.Tx, bc *Blockchain, history *historydb.HistoryDB) error { - logger.Info("Visor initHistory") - - shouldReset, err := history.NeedsReset(tx) - if err != nil { - return err - } - - if !shouldReset { - return nil - } - - logger.Info("Resetting historyDB") - - if err := history.Erase(tx); err != nil { - return err - } - - // Reparse the history up to the blockchain head - headSeq, _, err := bc.HeadSeq(tx) - if err != nil { - return err - } - - if err := parseHistoryTo(tx, history, bc, headSeq); err != nil { - logger.WithError(err).Error("parseHistoryTo failed") - return err - } - - return nil -} - -func parseHistoryTo(tx *dbutil.Tx, history *historydb.HistoryDB, bc *Blockchain, height uint64) error { - logger.Info("Visor parseHistoryTo") - - parsedBlockSeq, _, err := history.ParsedBlockSeq(tx) - if err != nil { - return err - } - - for i := uint64(0); i < height-parsedBlockSeq; i++ { - b, err := bc.GetSignedBlockBySeq(tx, parsedBlockSeq+i+1) - if err != nil { - return err - } - - if b == nil { - return fmt.Errorf("no block exists in depth: %d", parsedBlockSeq+i+1) - } - - if err := history.ParseBlock(tx, b.Block); err != nil { - return err - } - } - - return nil -} - -// maybeCreateGenesisBlock creates a genesis block if necessary -func (vs *Visor) maybeCreateGenesisBlock(tx *dbutil.Tx) error { - logger.Info("Visor maybeCreateGenesisBlock") - gb, err := vs.blockchain.GetGenesisBlock(tx) - if err != nil { - return err - } - if gb != nil { - return nil - } - - logger.Info("Create genesis block") - vs.GenesisPreconditions() - b, err := coin.NewGenesisBlock(vs.Config.GenesisAddress, vs.Config.GenesisCoinVolume, vs.Config.GenesisTimestamp) - if err != nil { - return err - } - - var sb coin.SignedBlock - // record the signature of genesis block - if vs.Config.IsBlockPublisher { - sb = vs.signBlock(*b) - logger.Infof("Genesis block signature=%s", sb.Sig.Hex()) - } else { - sb = coin.SignedBlock{ - Block: *b, - Sig: vs.Config.GenesisSignature, - } - } - - return vs.executeSignedBlock(tx, sb) -} - -// GenesisPreconditions panics if conditions for genesis block are not met -func (vs *Visor) GenesisPreconditions() { - if vs.Config.BlockchainSeckey != (cipher.SecKey{}) { - if vs.Config.BlockchainPubkey != cipher.MustPubKeyFromSecKey(vs.Config.BlockchainSeckey) { - logger.Panic("Cannot create genesis block. Invalid secret key for pubkey") - } - } -} - -// StartedAt returns the time that the visor was created -func (vs *Visor) StartedAt() time.Time { - return vs.startedAt -} - -// RefreshUnconfirmed checks unconfirmed txns against the blockchain and returns -// all transaction that turn to valid. -func (vs *Visor) RefreshUnconfirmed() ([]cipher.SHA256, error) { - var hashes []cipher.SHA256 - if err := vs.db.Update("RefreshUnconfirmed", func(tx *dbutil.Tx) error { - var err error - hashes, err = vs.unconfirmed.Refresh(tx, vs.blockchain, vs.Config.Distribution, vs.Config.UnconfirmedVerifyTxn) - return err - }); err != nil { - return nil, err - } - - return hashes, nil -} - -// RemoveInvalidUnconfirmed removes transactions that become permanently invalid -// (by violating hard constraints) from the pool. -// Returns the transaction hashes that were removed. -func (vs *Visor) RemoveInvalidUnconfirmed() ([]cipher.SHA256, error) { - var hashes []cipher.SHA256 - if err := vs.db.Update("RemoveInvalidUnconfirmed", func(tx *dbutil.Tx) error { - var err error - hashes, err = vs.unconfirmed.RemoveInvalid(tx, vs.blockchain) - return err - }); err != nil { - return nil, err - } - - return hashes, nil -} - -// createBlock creates a SignedBlock from pending transactions -func (vs *Visor) createBlock(tx *dbutil.Tx, when uint64) (coin.SignedBlock, error) { - if !vs.Config.IsBlockPublisher { - logger.Panic("Only a block publisher node can create blocks") - } - - // Gather all unconfirmed transactions - txns, err := vs.unconfirmed.AllRawTransactions(tx) - if err != nil { - return coin.SignedBlock{}, err - } - - b, err := vs.createBlockFromTxns(tx, txns, when) - if err != nil { - return coin.SignedBlock{}, err - } - - return vs.signBlock(b), nil -} - -// createBlockFromTxns creates a Block from specified set of transactions according to set of determinstic rules. -func (vs *Visor) createBlockFromTxns(tx *dbutil.Tx, txns coin.Transactions, when uint64) (coin.Block, error) { - if len(txns) == 0 { - return coin.Block{}, errors.New("No transactions") - } - - logger.Infof("unconfirmed pool has %d transactions pending", len(txns)) - - // Filter transactions that violate all constraints - var filteredTxns coin.Transactions - for _, txn := range txns { - if _, _, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, txn, vs.Config.Distribution, vs.Config.CreateBlockVerifyTxn, TxnSigned); err != nil { - switch err.(type) { - case ErrTxnViolatesHardConstraint, ErrTxnViolatesSoftConstraint: - logger.Warningf("Transaction %s violates constraints: %v", txn.Hash().Hex(), err) - default: - return coin.Block{}, err - } - } else { - filteredTxns = append(filteredTxns, txn) - } - } - - nRemoved := len(txns) - len(filteredTxns) - if nRemoved > 0 { - logger.Infof("CreateBlock ignored %d transactions violating constraints", nRemoved) - } - - txns = filteredTxns - - if len(txns) == 0 { - logger.Info("No transactions after filtering for constraint violations") - return coin.Block{}, errors.New("No transactions after filtering for constraint violations") - } - - head, err := vs.blockchain.Head(tx) - if err != nil { - return coin.Block{}, err - } - - // Sort them by highest fee per kilobyte - txns, err = coin.SortTransactions(txns, vs.blockchain.TransactionFee(tx, head.Time())) - if err != nil { - logger.Critical().WithError(err).Error("SortTransactions failed, no block can be made until the offending transaction is removed") - return coin.Block{}, err - } - - // Apply block size transaction limit - txns, err = txns.TruncateBytesTo(vs.Config.MaxBlockTransactionsSize) - if err != nil { - logger.Critical().WithError(err).Error("TruncateBytesTo failed, no block can be made until the offending transaction is removed") - return coin.Block{}, err - } - - if len(txns) > coin.MaxBlockTransactions { - txns = txns[:coin.MaxBlockTransactions] - } - - if len(txns) == 0 { - logger.Panic("TruncateBytesTo removed all transactions") - } - - logger.Infof("Creating new block with %d transactions, head time %d", len(txns), when) - - b, err := vs.blockchain.NewBlock(tx, txns, when) - if err != nil { - logger.Warningf("blockchain.NewBlock failed: %v", err) - return coin.Block{}, err - } - - return *b, nil -} - -// CreateAndExecuteBlock creates a SignedBlock from pending transactions and executes it -func (vs *Visor) CreateAndExecuteBlock() (coin.SignedBlock, error) { - var sb coin.SignedBlock - - err := vs.db.Update("CreateAndExecuteBlock", func(tx *dbutil.Tx) error { - var err error - sb, err = vs.createBlock(tx, uint64(time.Now().UTC().Unix())) - if err != nil { - return err - } - - return vs.executeSignedBlock(tx, sb) - }) - - return sb, err -} - -// CreateBlockFromTxns creates a Block from specified set of transactions according to set of determinstic rules. -func (vs *Visor) CreateBlockFromTxns(txns coin.Transactions, when uint64) (coin.Block, error) { - var sb coin.Block - - err := vs.db.Update("CreateBlockFromTxns", func(tx *dbutil.Tx) error { - var err error - if sb, err = vs.createBlockFromTxns(tx, txns, when); err != nil { - return err - } - - return nil - }) - - return sb, err -} - -// VerifyBlock verifies specified block against local copy of blockchain. -// Signature is not verified. -func (vs *Visor) VerifyBlock(b coin.SignedBlock) error { - return vs.db.View("VerifyBlock", func(tx *dbutil.Tx) error { - return vs.blockchain.VerifyBlock(tx, &b) - }) -} - -// ExecuteSignedBlock adds a block to the blockchain, or returns error. -// Blocks must be executed in sequence, and be signed by a block publisher node. -func (vs *Visor) ExecuteSignedBlock(b coin.SignedBlock) error { - return vs.db.Update("ExecuteSignedBlock", func(tx *dbutil.Tx) error { - return vs.executeSignedBlock(tx, b) - }) -} - -// ExecuteSignedBlockUnsafe adds block to the blockchain, or returns error. -// Blocks must be executed in sequence. Block signature is not verified. -func (vs *Visor) ExecuteSignedBlockUnsafe(b coin.SignedBlock) error { - return vs.db.Update("ExecuteSignedBlockUnsafe", func(tx *dbutil.Tx) error { - return vs.executeSignedBlockUnsafe(tx, b) - }) -} - -// executeSignedBlock adds a block to the blockchain, or returns error. -// Blocks must be executed in sequence, and be signed by a block publisher node. -func (vs *Visor) executeSignedBlock(tx *dbutil.Tx, b coin.SignedBlock) error { - if err := b.VerifySignature(vs.Config.BlockchainPubkey); err != nil { - return err - } - - return vs.executeSignedBlockUnsafe(tx, b) -} - -// executeSignedBlockUnsafe add a block to the blockchain, or returns error. -// Blocks must be executed in sequence. Block signature is not verified. -func (vs *Visor) executeSignedBlockUnsafe(tx *dbutil.Tx, b coin.SignedBlock) error { - if err := vs.blockchain.ExecuteBlock(tx, &b); err != nil { - return err - } - - // Remove the transactions in the Block from the unconfirmed pool - txnHashes := make([]cipher.SHA256, 0, len(b.Block.Body.Transactions)) - for _, txn := range b.Block.Body.Transactions { - txnHashes = append(txnHashes, txn.Hash()) - } - - if err := vs.unconfirmed.RemoveTransactions(tx, txnHashes); err != nil { - return err - } - - // Update the HistoryDB - return vs.history.ParseBlock(tx, b.Block) -} - -// signBlock signs a block for a block publisher node. Will panic if anything is invalid -func (vs *Visor) signBlock(b coin.Block) coin.SignedBlock { - if !vs.Config.IsBlockPublisher { - logger.Panic("Only a block publisher node can sign blocks") - } - - sig := cipher.MustSignHash(b.HashHeader(), vs.Config.BlockchainSeckey) - - return coin.SignedBlock{ - Block: b, - Sig: sig, - } -} - -/* - Return Data -*/ - -// GetAllUnspentOutputs returns all unspent outputs -func (vs *Visor) GetAllUnspentOutputs() (coin.UxArray, error) { - var ux []coin.UxOut - if err := vs.db.View("GetAllUnspentOutputs", func(tx *dbutil.Tx) error { - var err error - ux, err = vs.blockchain.Unspent().GetAll(tx) - return err - }); err != nil { - return nil, err - } - - return ux, nil -} - -// GetUnspentOutputs returns unspent outputs from the pool, queried by hashes. -// If any do not exist, ErrUnspentNotExist is returned -func (vs *Visor) GetUnspentOutputs(hashes []cipher.SHA256) (coin.UxArray, error) { - var outputs coin.UxArray - if err := vs.db.View("GetUnspentOutputs", func(tx *dbutil.Tx) error { - var err error - outputs, err = vs.blockchain.Unspent().GetArray(tx, hashes) - return err - }); err != nil { - return nil, err - } - - return outputs, nil -} - -// UnconfirmedOutgoingOutputs returns all outputs that would be spent by unconfirmed transactions -func (vs *Visor) UnconfirmedOutgoingOutputs() (coin.UxArray, error) { - var uxa coin.UxArray - - if err := vs.db.View("UnconfirmedOutgoingOutputs", func(tx *dbutil.Tx) error { - var err error - uxa, err = vs.unconfirmedOutgoingOutputs(tx) - return err - }); err != nil { - return nil, err - } - - return uxa, nil -} - -func (vs *Visor) unconfirmedOutgoingOutputs(tx *dbutil.Tx) (coin.UxArray, error) { - txns, err := vs.unconfirmed.AllRawTransactions(tx) - if err != nil { - return nil, err - } - - var inputs []cipher.SHA256 - for _, txn := range txns { - inputs = append(inputs, txn.In...) - } - - return vs.blockchain.Unspent().GetArray(tx, inputs) -} - -// UnconfirmedIncomingOutputs returns all outputs that would be created by unconfirmed transactions -func (vs *Visor) UnconfirmedIncomingOutputs() (coin.UxArray, error) { - var uxa coin.UxArray - - if err := vs.db.View("UnconfirmedIncomingOutputs", func(tx *dbutil.Tx) error { - var err error - uxa, err = vs.unconfirmedIncomingOutputs(tx) - return err - }); err != nil { - return nil, err - } - - return uxa, nil -} - -func (vs *Visor) unconfirmedIncomingOutputs(tx *dbutil.Tx) (coin.UxArray, error) { - head, err := vs.blockchain.Head(tx) - if err != nil { - return nil, err - } - - return vs.unconfirmed.GetIncomingOutputs(tx, head.Head) -} - -// GetSignedBlocksSince returns N signed blocks more recent than Seq. Does not return nil. -func (vs *Visor) GetSignedBlocksSince(seq, ct uint64) ([]coin.SignedBlock, error) { - var blocks []coin.SignedBlock - - if err := vs.db.View("GetSignedBlocksSince", func(tx *dbutil.Tx) error { - avail := uint64(0) - head, err := vs.blockchain.Head(tx) - if err != nil { - return err - } - - headSeq := head.Seq() - if headSeq > seq { - avail = headSeq - seq - } - if avail < ct { - ct = avail - } - if ct == 0 { - return nil - } - - blocks = make([]coin.SignedBlock, 0, ct) - for j := uint64(0); j < ct; j++ { - i := seq + 1 + j - b, err := vs.blockchain.GetSignedBlockBySeq(tx, i) - if err != nil { - return err - } - - blocks = append(blocks, *b) - } - - return nil - }); err != nil { - return nil, err - } - - return blocks, nil -} - -// HeadBkSeq returns the highest BkSeq we know, returns false in the 2nd return value -// if the blockchain is empty -func (vs *Visor) HeadBkSeq() (uint64, bool, error) { - var headSeq uint64 - var ok bool - - if err := vs.db.View("HeadBkSeq", func(tx *dbutil.Tx) error { - var err error - headSeq, ok, err = vs.blockchain.HeadSeq(tx) - return err - }); err != nil { - return 0, false, err - } - - return headSeq, ok, nil -} - -// GetBlockchainMetadata returns descriptive blockchain information -func (vs *Visor) GetBlockchainMetadata() (*BlockchainMetadata, error) { - var head *coin.SignedBlock - var unconfirmedLen, unspentsLen uint64 - - if err := vs.db.View("GetBlockchainMetadata", func(tx *dbutil.Tx) error { - var err error - head, err = vs.blockchain.Head(tx) - if err != nil { - return err - } - - unconfirmedLen, err = vs.unconfirmed.Len(tx) - if err != nil { - return err - } - - unspentsLen, err = vs.blockchain.Unspent().Len(tx) - return err - }); err != nil { - return nil, err - } - - return NewBlockchainMetadata(*head, unconfirmedLen, unspentsLen) -} - -// GetBlock returns a copy of the block at seq. Returns error if seq out of range -func (vs *Visor) GetBlock(seq uint64) (*coin.SignedBlock, error) { - var b *coin.SignedBlock - - if err := vs.db.View("GetBlock", func(tx *dbutil.Tx) error { - headSeq, ok, err := vs.blockchain.HeadSeq(tx) - if err != nil { - return err - } - - if !ok || seq > headSeq { - return errors.New("Block seq out of range") - } - - b, err = vs.blockchain.GetSignedBlockBySeq(tx, seq) - return err - }); err != nil { - return nil, err - } - - return b, nil -} - -// GetBlocks returns blocks matches seqs -func (vs *Visor) GetBlocks(seqs []uint64) ([]coin.SignedBlock, error) { - var blocks []coin.SignedBlock - - if err := vs.db.View("GetBlocks", func(tx *dbutil.Tx) error { - var err error - blocks, err = vs.blockchain.GetBlocks(tx, seqs) - return err - }); err != nil { - return nil, err - } - - return blocks, nil -} - -// GetBlocksVerbose returns blocks matches seqs along with verbose transaction input data -func (vs *Visor) GetBlocksVerbose(seqs []uint64) ([]coin.SignedBlock, [][][]TransactionInput, error) { - var blocks []coin.SignedBlock - var inputs [][][]TransactionInput - - if err := vs.db.View("GetBlocksVerbose", func(tx *dbutil.Tx) error { - var err error - blocks, inputs, err = vs.getBlocksVerbose(tx, func(tx *dbutil.Tx) ([]coin.SignedBlock, error) { - return vs.blockchain.GetBlocks(tx, seqs) - }) - return err - }); err != nil { - return nil, nil, err - } - - return blocks, inputs, nil -} - -// GetBlocksInRange returns multiple blocks between start and end, including both start and end. -// Returns the empty slice if unable to fulfill request. -func (vs *Visor) GetBlocksInRange(start, end uint64) ([]coin.SignedBlock, error) { - var blocks []coin.SignedBlock - - if err := vs.db.View("GetBlocksInRange", func(tx *dbutil.Tx) error { - var err error - blocks, err = vs.blockchain.GetBlocksInRange(tx, start, end) - return err - }); err != nil { - return nil, err - } - - return blocks, nil -} - -// GetBlocksInRangeVerbose returns multiple blocks between start and end, including both start and end. -// Also returns the verbose transaction input data for transactions in these blocks. -// Returns the empty slice if unable to fulfill request. -func (vs *Visor) GetBlocksInRangeVerbose(start, end uint64) ([]coin.SignedBlock, [][][]TransactionInput, error) { - var blocks []coin.SignedBlock - var inputs [][][]TransactionInput - - if err := vs.db.View("GetBlocksInRangeVerbose", func(tx *dbutil.Tx) error { - var err error - blocks, inputs, err = vs.getBlocksVerbose(tx, func(tx *dbutil.Tx) ([]coin.SignedBlock, error) { - return vs.blockchain.GetBlocksInRange(tx, start, end) - }) - return err - }); err != nil { - return nil, nil, err - } - - return blocks, inputs, nil -} - -// GetLastBlocks returns last N blocks -func (vs *Visor) GetLastBlocks(num uint64) ([]coin.SignedBlock, error) { - var blocks []coin.SignedBlock - - if err := vs.db.View("GetLastBlocks", func(tx *dbutil.Tx) error { - var err error - blocks, err = vs.blockchain.GetLastBlocks(tx, num) - return err - }); err != nil { - return nil, err - } - - return blocks, nil -} - -// GetLastBlocksVerbose returns last N blocks with verbose transaction input data -func (vs *Visor) GetLastBlocksVerbose(num uint64) ([]coin.SignedBlock, [][][]TransactionInput, error) { - var blocks []coin.SignedBlock - var inputs [][][]TransactionInput - - if err := vs.db.View("GetLastBlocksVerbose", func(tx *dbutil.Tx) error { - var err error - blocks, inputs, err = vs.getBlocksVerbose(tx, func(tx *dbutil.Tx) ([]coin.SignedBlock, error) { - return vs.blockchain.GetLastBlocks(tx, num) - }) - return err - }); err != nil { - return nil, nil, err - } - - return blocks, inputs, nil -} - -func (vs *Visor) getBlocksVerbose(tx *dbutil.Tx, getBlocks func(*dbutil.Tx) ([]coin.SignedBlock, error)) ([]coin.SignedBlock, [][][]TransactionInput, error) { - blocks, err := getBlocks(tx) - if err != nil { - return nil, nil, err - } - - if len(blocks) == 0 { - return nil, nil, nil - } - - inputs := make([][][]TransactionInput, len(blocks)) - for i, b := range blocks { - blockInputs, err := vs.getBlockInputs(tx, &b) - if err != nil { - return nil, nil, err - } - inputs[i] = blockInputs - } - - return blocks, inputs, nil -} - -// InjectForeignTransaction records a coin.Transaction to the UnconfirmedTransactionPool if the txn is not -// already in the blockchain. -// The bool return value is whether or not the transaction was already in the pool. -// If the transaction violates hard constraints, it is rejected, and error will not be nil. -// If the transaction only violates soft constraints, it is still injected, and the soft constraint violation is returned. -// This method is intended for transactions received over the network. -func (vs *Visor) InjectForeignTransaction(txn coin.Transaction) (bool, *ErrTxnViolatesSoftConstraint, error) { - var known bool - var softErr *ErrTxnViolatesSoftConstraint - - if err := vs.db.Update("InjectForeignTransaction", func(tx *dbutil.Tx) error { - var err error - known, softErr, err = vs.unconfirmed.InjectTransaction(tx, vs.blockchain, txn, vs.Config.Distribution, vs.Config.UnconfirmedVerifyTxn) - return err - }); err != nil { - return false, nil, err - } - - return known, softErr, nil -} - -// InjectUserTransaction records a coin.Transaction to the UnconfirmedTransactionPool if the txn is not -// already in the blockchain. -// The bool return value is whether or not the transaction was already in the pool. -// If the transaction violates hard or soft constraints, it is rejected, and error will not be nil. -func (vs *Visor) InjectUserTransaction(txn coin.Transaction) (bool, *coin.SignedBlock, coin.UxArray, error) { - var known bool - var head *coin.SignedBlock - var inputs coin.UxArray - - if err := vs.db.Update("InjectUserTransaction", func(tx *dbutil.Tx) error { - var err error - known, head, inputs, err = vs.InjectUserTransactionTx(tx, txn) - return err - }); err != nil { - return false, nil, nil, err - } - - return known, head, inputs, nil -} - -// InjectUserTransactionTx records a coin.Transaction to the UnconfirmedTransactionPool if the txn is not -// already in the blockchain. -// The bool return value is whether or not the transaction was already in the pool. -// If the transaction violates hard or soft constraints, it is rejected, and error will not be nil. -// This method is only exported for use by the daemon gateway's InjectBroadcastTransaction method. -func (vs *Visor) InjectUserTransactionTx(tx *dbutil.Tx, txn coin.Transaction) (bool, *coin.SignedBlock, coin.UxArray, error) { - if err := VerifySingleTxnUserConstraints(txn); err != nil { - return false, nil, nil, err - } - - head, inputs, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, txn, vs.Config.Distribution, params.UserVerifyTxn, TxnSigned) - if err != nil { - return false, nil, nil, err - } - - known, softErr, err := vs.unconfirmed.InjectTransaction(tx, vs.blockchain, txn, vs.Config.Distribution, params.UserVerifyTxn) - if softErr != nil { - logger.WithError(softErr).Warning("InjectUserTransaction vs.unconfirmed.InjectTransaction returned a softErr unexpectedly") - } - - return known, head, inputs, err -} - -// GetTransactionsForAddress returns the Transactions whose unspents give coins to a cipher.Address. -// This includes both confirmed and unconfirmed transactions. -func (vs *Visor) GetTransactionsForAddress(a cipher.Address) ([]Transaction, error) { - var txns map[cipher.Address][]Transaction - - if err := vs.db.View("GetTransactionsForAddress", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.getTransactionsForAddresses(tx, []cipher.Address{a}) - return err - }); err != nil { - return nil, err - } - - return txns[a], nil -} - -// GetTransaction returns a Transaction by hash. -func (vs *Visor) GetTransaction(txnHash cipher.SHA256) (*Transaction, error) { - var txn *Transaction - - if err := vs.db.View("GetTransaction", func(tx *dbutil.Tx) error { - var err error - txn, err = vs.getTransaction(tx, txnHash) - return err - }); err != nil { - return nil, err - } - - return txn, nil -} - -// GetTransactionWithInputs returns a Transaction by hash, along with the unspent outputs of its inputs -func (vs *Visor) GetTransactionWithInputs(txnHash cipher.SHA256) (*Transaction, []TransactionInput, error) { - var txn *Transaction - var inputs []TransactionInput - - if err := vs.db.View("GetTransactionWithInputs", func(tx *dbutil.Tx) error { - var err error - txn, err = vs.getTransaction(tx, txnHash) - if err != nil { - return err - } - - if txn == nil { - return nil - } - - feeCalcTime, err := vs.getFeeCalcTimeForTransaction(tx, *txn) - if err != nil { - return err - } - if feeCalcTime == nil { - return nil - } - - inputs, err = vs.getTransactionInputs(tx, *feeCalcTime, txn.Transaction.In) - return err - }); err != nil { - return nil, nil, err - } - - return txn, inputs, nil -} - -func (vs *Visor) getTransaction(tx *dbutil.Tx, txnHash cipher.SHA256) (*Transaction, error) { - // Look in the unconfirmed pool - utxn, err := vs.unconfirmed.Get(tx, txnHash) - if err != nil { - return nil, err - } - - if utxn != nil { - return &Transaction{ - Transaction: utxn.Transaction, - Status: NewUnconfirmedTransactionStatus(), - Time: uint64(timeutil.NanoToTime(utxn.Received).Unix()), - }, nil - } - - htxn, err := vs.history.GetTransaction(tx, txnHash) - if err != nil { - return nil, err - } - - if htxn == nil { - return nil, nil - } - - headSeq, ok, err := vs.blockchain.HeadSeq(tx) - if err != nil { - return nil, err - } else if !ok { - return nil, errors.New("blockchain is empty but history has transactions") - } - - b, err := vs.blockchain.GetSignedBlockBySeq(tx, htxn.BlockSeq) - if err != nil { - return nil, err - } - - if b == nil { - return nil, fmt.Errorf("found no block in seq %v", htxn.BlockSeq) - } - - if headSeq < htxn.BlockSeq { - return nil, fmt.Errorf("blockchain head seq %d is earlier than history txn seq %d", headSeq, htxn.BlockSeq) - } - - confirms := headSeq - htxn.BlockSeq + 1 - return &Transaction{ - Transaction: htxn.Txn, - Status: NewConfirmedTransactionStatus(confirms, htxn.BlockSeq), - Time: b.Time(), - }, nil -} - -// TxFilter transaction filter type -type TxFilter interface { - // Returns whether the transaction is matched - Match(*Transaction) bool -} - -// BaseFilter is a helper struct for generating TxFilter. -type BaseFilter struct { - F func(tx *Transaction) bool -} - -// Match matches the filter based upon F -func (f BaseFilter) Match(tx *Transaction) bool { - return f.F(tx) -} - -// NewAddrsFilter collects all addresses related transactions. -func NewAddrsFilter(addrs []cipher.Address) TxFilter { - return AddrsFilter{Addrs: addrs} -} - -// AddrsFilter filters by addresses -type AddrsFilter struct { - Addrs []cipher.Address -} - -// Match implements the TxFilter interface, this actually won't be used, only the 'Addrs' member is used. -func (af AddrsFilter) Match(tx *Transaction) bool { return true } - -// NewConfirmedTxFilter collects the transaction whose 'Confirmed' status matchs the parameter passed in. -func NewConfirmedTxFilter(isConfirmed bool) TxFilter { - return BaseFilter{F: func(tx *Transaction) bool { - return tx.Status.Confirmed == isConfirmed - }} -} - -// GetTransactions returns transactions that can pass the filters. -// If no filters is provided, returns all transactions. -func (vs *Visor) GetTransactions(flts []TxFilter) ([]Transaction, error) { - var txns []Transaction - - if err := vs.db.View("GetTransactions", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.getTransactions(tx, flts) - return err - }); err != nil { - return nil, err - } - - return txns, nil -} - -// GetTransactionsWithInputs is the same as GetTransactions but also returns verbose transaction input data -func (vs *Visor) GetTransactionsWithInputs(flts []TxFilter) ([]Transaction, [][]TransactionInput, error) { - var txns []Transaction - var inputs [][]TransactionInput - - if err := vs.db.View("GetTransactionsWithInputs", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.getTransactions(tx, flts) - if err != nil { - return err - } - - inputs = make([][]TransactionInput, len(txns)) - for i, txn := range txns { - feeCalcTime, err := vs.getFeeCalcTimeForTransaction(tx, txn) - if err != nil { - return err - } - if feeCalcTime == nil { - continue - } - - txnInputs, err := vs.getTransactionInputs(tx, *feeCalcTime, txn.Transaction.In) - if err != nil { - return err - } - - inputs[i] = txnInputs - } - - return nil - }); err != nil { - return nil, nil, err - } - - return txns, inputs, nil -} - -func (vs *Visor) getTransactions(tx *dbutil.Tx, flts []TxFilter) ([]Transaction, error) { - var addrFlts []AddrsFilter - var otherFlts []TxFilter - // Splits the filters into AddrsFilter and other filters - for _, f := range flts { - switch v := f.(type) { - case AddrsFilter: - addrFlts = append(addrFlts, v) - default: - otherFlts = append(otherFlts, f) - } - } - - // Accumulates all addresses in address filters - addrs := accumulateAddressInFilter(addrFlts) - - // Traverses all transactions to do collection if there's no address filter. - if len(addrs) == 0 { - return vs.traverseTxns(tx, otherFlts) - } - - // Gets addresses related transactions - addrTxns, err := vs.getTransactionsForAddresses(tx, addrs) - if err != nil { - return nil, err - } - - // Converts address transactions map into []Transaction, - // and remove duplicate txns - txnMap := make(map[cipher.SHA256]struct{}) - var txns []Transaction - for _, aTxns := range addrTxns { - for _, txn := range aTxns { - txnHash := txn.Transaction.Hash() - if _, exist := txnMap[txnHash]; exist { - continue - } - txnMap[txnHash] = struct{}{} - txns = append(txns, txn) - } - } - - // Checks other filters - f := func(txn *Transaction, flts []TxFilter) bool { - for _, flt := range flts { - if !flt.Match(txn) { - return false - } - } - - return true - } - - var retTxns []Transaction - for _, txn := range txns { - if f(&txn, otherFlts) { - retTxns = append(retTxns, txn) - } - } - - return retTxns, nil -} - -func accumulateAddressInFilter(afs []AddrsFilter) []cipher.Address { - // Accumulate all addresses in address filters - addrMap := make(map[cipher.Address]struct{}) - var addrs []cipher.Address - for _, af := range afs { - for _, a := range af.Addrs { - if _, exist := addrMap[a]; exist { - continue - } - addrMap[a] = struct{}{} - addrs = append(addrs, a) - } - } - return addrs -} - -// getTransactionsForAddresses returns all addresses related transactions. -// Including both confirmed and unconfirmed transactions. -func (vs *Visor) getTransactionsForAddresses(tx *dbutil.Tx, addrs []cipher.Address) (map[cipher.Address][]Transaction, error) { - // Get the head block seq, for calculating the txn status - headBkSeq, ok, err := vs.blockchain.HeadSeq(tx) - - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("No head block seq") - } - - ret := make(map[cipher.Address][]Transaction, len(addrs)) - for _, a := range addrs { - addrTxns, err := vs.history.GetTransactionsForAddress(tx, a) - if err != nil { - return nil, err - } - - txns := make([]Transaction, len(addrTxns), len(addrTxns)+4) - for i, txn := range addrTxns { - if headBkSeq < txn.BlockSeq { - err := errors.New("Transaction block sequence is greater than the head block sequence") - logger.Critical().WithError(err).WithFields(logrus.Fields{ - "headBkSeq": headBkSeq, - "txnBlockSeq": txn.BlockSeq, - }).Error() - return nil, err - } - h := headBkSeq - txn.BlockSeq + 1 - - bk, err := vs.blockchain.GetSignedBlockBySeq(tx, txn.BlockSeq) - if err != nil { - return nil, err - } - - if bk == nil { - return nil, fmt.Errorf("block seq=%d doesn't exist", txn.BlockSeq) - } - - txns[i] = Transaction{ - Transaction: txn.Txn, - Status: NewConfirmedTransactionStatus(h, txn.BlockSeq), - Time: bk.Time(), - } - } - - // Look in the unconfirmed pool - uxs, err := vs.unconfirmed.GetUnspentsOfAddr(tx, a) - if err != nil { - return nil, err - } - - for _, ux := range uxs { - txn, err := vs.unconfirmed.Get(tx, ux.Body.SrcTransaction) - if err != nil { - return nil, err - } - - if txn == nil { - logger.Critical().Error("unconfirmed unspent missing unconfirmed txn") - continue - } - - txns = append(txns, Transaction{ - Transaction: txn.Transaction, - Status: NewUnconfirmedTransactionStatus(), - Time: uint64(timeutil.NanoToTime(txn.Received).Unix()), - }) - } - - ret[a] = txns - } - - return ret, nil -} - -// traverseTxns traverses transactions in historydb and unconfirmed tx pool in db, -// returns transactions that can pass the filters. -func (vs *Visor) traverseTxns(tx *dbutil.Tx, flts []TxFilter) ([]Transaction, error) { - // Get the head block seq, for calculating the tx status - headBkSeq, ok, err := vs.blockchain.HeadSeq(tx) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("No head block seq") - } - - var txns []Transaction - - if err := vs.history.ForEachTxn(tx, func(_ cipher.SHA256, hTxn *historydb.Transaction) error { - if headBkSeq < hTxn.BlockSeq { - err := errors.New("Transaction block sequence is less than the head block sequence") - logger.Critical().WithError(err).WithFields(logrus.Fields{ - "headBkSeq": headBkSeq, - "txBlockSeq": hTxn.BlockSeq, - }).Error() - return err - } - - h := headBkSeq - hTxn.BlockSeq + 1 - - bk, err := vs.blockchain.GetSignedBlockBySeq(tx, hTxn.BlockSeq) - if err != nil { - return fmt.Errorf("get block of seq: %v failed: %v", hTxn.BlockSeq, err) - } - - if bk == nil { - return fmt.Errorf("block of seq: %d doesn't exist", hTxn.BlockSeq) - } - - txn := Transaction{ - Transaction: hTxn.Txn, - Status: NewConfirmedTransactionStatus(h, hTxn.BlockSeq), - Time: bk.Time(), - } - - // Checks filters - for _, f := range flts { - if !f.Match(&txn) { - return nil - } - } - - txns = append(txns, txn) - return nil - }); err != nil { - return nil, err - } - - txns = sortTxns(txns) - - // Gets all unconfirmed transactions - unconfirmedTxns, err := vs.unconfirmed.GetFiltered(tx, func(txn UnconfirmedTransaction) bool { - return true - }) - if err != nil { - return nil, err - } - - for _, ux := range unconfirmedTxns { - txn := Transaction{ - Transaction: ux.Transaction, - Status: NewUnconfirmedTransactionStatus(), - Time: uint64(timeutil.NanoToTime(ux.Received).Unix()), - } - - // Checks filters - for _, f := range flts { - if !f.Match(&txn) { - continue - } - txns = append(txns, txn) - } - } - return txns, nil -} - -// Sort transactions by block seq, if equal then compare hash -func sortTxns(txns []Transaction) []Transaction { - sort.Slice(txns, func(i, j int) bool { - if txns[i].Status.BlockSeq < txns[j].Status.BlockSeq { - return true - } - - if txns[i].Status.BlockSeq > txns[j].Status.BlockSeq { - return false - } - - // If transactions in the same block, compare the hash string - return txns[i].Transaction.Hash().Hex() < txns[j].Transaction.Hash().Hex() - }) - return txns -} - -// AddressBalances computes the total balance for cipher.Addresses and their coin.UxOuts -func (vs *Visor) AddressBalances(head *coin.SignedBlock, auxs coin.AddressUxOuts) (uint64, uint64, error) { - prevTime := head.Time() - var coins uint64 - var hours uint64 - for _, uxs := range auxs { - for _, ux := range uxs { - uxHours, err := ux.CoinHours(prevTime) - if err != nil { - return 0, 0, err - } - - coins, err = mathutil.AddUint64(coins, ux.Body.Coins) - if err != nil { - return 0, 0, err - } - - hours, err = mathutil.AddUint64(hours, uxHours) - if err != nil { - return 0, 0, err - } - } - } - return coins, hours, nil -} - -// GetUnconfirmedTransactions gets all confirmed transactions of specific addresses -func (vs *Visor) GetUnconfirmedTransactions(filter func(UnconfirmedTransaction) bool) ([]UnconfirmedTransaction, error) { - var txns []UnconfirmedTransaction - - if err := vs.db.View("GetUnconfirmedTransactions", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.unconfirmed.GetFiltered(tx, filter) - return err - }); err != nil { - return nil, err - } - - return txns, nil -} - -// GetUnconfirmedTransactionsVerbose gets all confirmed transactions of specific addresses -func (vs *Visor) GetUnconfirmedTransactionsVerbose(filter func(UnconfirmedTransaction) bool) ([]UnconfirmedTransaction, [][]TransactionInput, error) { - var txns []UnconfirmedTransaction - var inputs [][]TransactionInput - - if err := vs.db.View("GetUnconfirmedTransactionsVerbose", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.unconfirmed.GetFiltered(tx, filter) - if err != nil { - return err - } - - inputs, err = vs.getTransactionInputsForUnconfirmedTxns(tx, txns) - - return err - }); err != nil { - return nil, nil, err - } - - if len(txns) == 0 { - return nil, nil, nil - } - - return txns, inputs, nil -} - -// SendsToAddresses represents a filter that check if tx has output to the given addresses -func SendsToAddresses(addresses []cipher.Address) func(UnconfirmedTransaction) bool { - return func(tx UnconfirmedTransaction) (isRelated bool) { - for _, out := range tx.Transaction.Out { - for _, address := range addresses { - if out.Address == address { - isRelated = true - return - } - } - } - return - } -} - -// GetAllUnconfirmedTransactions returns all unconfirmed transactions -func (vs *Visor) GetAllUnconfirmedTransactions() ([]UnconfirmedTransaction, error) { - var txns []UnconfirmedTransaction - - if err := vs.db.View("GetAllUnconfirmedTransactions", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.unconfirmed.GetFiltered(tx, All) - return err - }); err != nil { - return nil, err - } - - return txns, nil -} - -// GetAllUnconfirmedTransactionsVerbose returns all unconfirmed transactions with verbose transaction input data -func (vs *Visor) GetAllUnconfirmedTransactionsVerbose() ([]UnconfirmedTransaction, [][]TransactionInput, error) { - var txns []UnconfirmedTransaction - var inputs [][]TransactionInput - - if err := vs.db.View("GetAllUnconfirmedTransactionsVerbose", func(tx *dbutil.Tx) error { - var err error - txns, err = vs.unconfirmed.GetFiltered(tx, All) - if err != nil { - return err - } - - inputs, err = vs.getTransactionInputsForUnconfirmedTxns(tx, txns) - - return err - }); err != nil { - return nil, nil, err - } - - if len(txns) == 0 { - return nil, nil, nil - } - - return txns, inputs, nil -} - -// getTransactionInputsForUnconfirmedTxns returns ReadableTransactionInputs for a set of UnconfirmedTransactions -func (vs *Visor) getTransactionInputsForUnconfirmedTxns(tx *dbutil.Tx, txns []UnconfirmedTransaction) ([][]TransactionInput, error) { - if len(txns) == 0 { - return nil, nil - } - - // Use the current head time to calculate estimated coin hours of unconfirmed transactions - headTime, err := vs.blockchain.Time(tx) - if err != nil { - return nil, err - } - - inputs := make([][]TransactionInput, len(txns)) - for i, txn := range txns { - if len(txn.Transaction.In) == 0 { - logger.Critical().WithField("txid", txn.Transaction.Hash().Hex()).Warning("unconfirmed transaction has no inputs") - continue - } - - txnInputs, err := vs.getTransactionInputs(tx, headTime, txn.Transaction.In) - if err != nil { - return nil, err - } - - inputs[i] = txnInputs - } - - return inputs, nil -} - -// getFeeCalcTimeForTransaction returns the time against which a transaction's fee should be calculated. -// The genesis block has no inputs and thus no fee to calculate, so it returns nil. -// A confirmed transaction's fee was calculated from the previous block's head time, when it was executed. -// An unconfirmed transaction's fee will be calculated from the current block head time, once executed. -func (vs *Visor) getFeeCalcTimeForTransaction(tx *dbutil.Tx, txn Transaction) (*uint64, error) { - // The genesis block has no inputs to calculate, otherwise calculate the inputs - if txn.Status.BlockSeq == 0 && txn.Status.Confirmed { - return nil, nil - } - - feeCalcTime := uint64(0) - if txn.Status.Confirmed { - // Use the previous block head to calculate the coin hours - prevBlock, err := vs.blockchain.GetSignedBlockBySeq(tx, txn.Status.BlockSeq-1) - if err != nil { - return nil, err - } - - if prevBlock == nil { - err := fmt.Errorf("getFeeCalcTimeForTransaction: prevBlock seq=%d not found", txn.Status.BlockSeq-1) - logger.Critical().WithError(err).Error("getFeeCalcTimeForTransaction") - return nil, err - } - - feeCalcTime = prevBlock.Block.Head.Time - } else { - // Use the current block head to calculate the coin hours - var err error - feeCalcTime, err = vs.blockchain.Time(tx) - if err != nil { - return nil, err - } - } - - return &feeCalcTime, nil -} - -// GetAllValidUnconfirmedTxHashes returns all valid unconfirmed transaction hashes -func (vs *Visor) GetAllValidUnconfirmedTxHashes() ([]cipher.SHA256, error) { - var hashes []cipher.SHA256 - - if err := vs.db.View("GetAllValidUnconfirmedTxHashes", func(tx *dbutil.Tx) error { - var err error - hashes, err = vs.unconfirmed.GetHashes(tx, IsValid) - return err - }); err != nil { - return nil, err - } - - return hashes, nil -} - -// GetConfirmedTransaction returns transaction, which has been already included in some block. -func (vs *Visor) GetConfirmedTransaction(txnHash cipher.SHA256) (*coin.Transaction, error) { - var histTxn *historydb.Transaction - - if err := vs.db.View("GetConfirmedTransaction", func(tx *dbutil.Tx) error { - var err error - histTxn, err = vs.history.GetTransaction(tx, txnHash) - return err - }); err != nil { - return nil, err - } - - // Transaction not found. - if histTxn == nil { - return nil, nil - } - - return &histTxn.Txn, nil -} - -// GetSignedBlockByHash get block of specific hash header, return nil on not found. -func (vs *Visor) GetSignedBlockByHash(hash cipher.SHA256) (*coin.SignedBlock, error) { - var sb *coin.SignedBlock - - if err := vs.db.View("GetSignedBlockByHash", func(tx *dbutil.Tx) error { - var err error - sb, err = vs.blockchain.GetSignedBlockByHash(tx, hash) - return err - }); err != nil { - return nil, err - } - - return sb, nil -} - -// GetSignedBlockBySeq get block of specific seq, return nil on not found. -func (vs *Visor) GetSignedBlockBySeq(seq uint64) (*coin.SignedBlock, error) { - var b *coin.SignedBlock - - if err := vs.db.View("GetSignedBlockBySeq", func(tx *dbutil.Tx) error { - var err error - b, err = vs.blockchain.GetSignedBlockBySeq(tx, seq) - return err - }); err != nil { - return nil, err - } - - return b, nil -} - -// GetSignedBlockByHashVerbose returns a coin.SignedBlock and its transactions' input data for a given block hash -func (vs *Visor) GetSignedBlockByHashVerbose(hash cipher.SHA256) (*coin.SignedBlock, [][]TransactionInput, error) { - var b *coin.SignedBlock - var inputs [][]TransactionInput - - if err := vs.db.View("GetSignedBlockByHashVerbose", func(tx *dbutil.Tx) error { - var err error - b, inputs, err = vs.getBlockVerbose(tx, func(tx *dbutil.Tx) (*coin.SignedBlock, error) { - return vs.blockchain.GetSignedBlockByHash(tx, hash) - }) - return err - }); err != nil { - return nil, nil, err - } - - return b, inputs, nil -} - -// GetSignedBlockBySeqVerbose returns a coin.SignedBlock and its transactions' input data for a given block hash -func (vs *Visor) GetSignedBlockBySeqVerbose(seq uint64) (*coin.SignedBlock, [][]TransactionInput, error) { - var b *coin.SignedBlock - var inputs [][]TransactionInput - - if err := vs.db.View("GetSignedBlockBySeqVerbose", func(tx *dbutil.Tx) error { - var err error - b, inputs, err = vs.getBlockVerbose(tx, func(tx *dbutil.Tx) (*coin.SignedBlock, error) { - return vs.blockchain.GetSignedBlockBySeq(tx, seq) - }) - return err - }); err != nil { - return nil, nil, err - } - - return b, inputs, nil -} - -func (vs *Visor) getBlockVerbose(tx *dbutil.Tx, getBlock func(*dbutil.Tx) (*coin.SignedBlock, error)) (*coin.SignedBlock, [][]TransactionInput, error) { - b, err := getBlock(tx) - if err != nil { - return nil, nil, err - } - - if b == nil { - return nil, nil, nil - } - - inputs, err := vs.getBlockInputs(tx, b) - if err != nil { - return nil, nil, err - } - - return b, inputs, nil -} - -func (vs *Visor) getBlockInputs(tx *dbutil.Tx, b *coin.SignedBlock) ([][]TransactionInput, error) { - if b == nil { - return nil, nil - } - - // The genesis block has no inputs to query or to calculate fees from - if b.Block.Head.BkSeq == 0 { - if len(b.Block.Body.Transactions) != 1 { - logger.Panicf("Genesis block should have only 1 transaction (has %d)", len(b.Block.Body.Transactions)) - } - - if len(b.Block.Body.Transactions[0].In) != 0 { - logger.Panic("Genesis block transaction should not have inputs") - } - - inputs := make([][]TransactionInput, 1) - - return inputs, nil - } - - // When a transaction was added to a block, its coinhour fee was - // calculated based upon the time of the head block. - // So we need to look at the previous block - prevBlock, err := vs.blockchain.GetSignedBlockBySeq(tx, b.Head.BkSeq-1) - if err != nil { - return nil, err - } - - if prevBlock == nil { - err := fmt.Errorf("getBlockInputs: prevBlock seq %d not found", b.Head.BkSeq-1) - logger.Critical().WithError(err).Error() - return nil, err - } - - var inputs [][]TransactionInput - for _, txn := range b.Block.Body.Transactions { - i, err := vs.getTransactionInputs(tx, prevBlock.Block.Head.Time, txn.In) - if err != nil { - return nil, err - } - - inputs = append(inputs, i) - } - - return inputs, nil -} - -// getTransactionInputs returns []TransactionInput for a given set of spent output hashes. -// feeCalcTime is the time against which to calculate the coinhours of the output -func (vs *Visor) getTransactionInputs(tx *dbutil.Tx, feeCalcTime uint64, inputs []cipher.SHA256) ([]TransactionInput, error) { - if len(inputs) == 0 { - err := errors.New("getTransactionInputs: inputs is empty only the genesis block transaction has no inputs, which shouldn't call this method") - logger.WithError(err).Error() - return nil, err - } - - uxOuts, err := vs.history.GetUxOuts(tx, inputs) - if err != nil { - logger.WithError(err).Error("getTransactionInputs GetUxOuts failed") - return nil, err - } - - ret := make([]TransactionInput, len(inputs)) - for i, o := range uxOuts { - r, err := NewTransactionInput(o.Out, feeCalcTime) - if err != nil { - logger.WithError(err).Error("getTransactionInputs NewTransactionInput failed") - return nil, err - } - ret[i] = r - } - - return ret, nil -} - -// GetHeadBlock gets head block. -func (vs Visor) GetHeadBlock() (*coin.SignedBlock, error) { - var b *coin.SignedBlock - - if err := vs.db.View("GetHeadBlock", func(tx *dbutil.Tx) error { - var err error - b, err = vs.blockchain.Head(tx) - return err - }); err != nil { - return nil, err - } - - return b, nil -} - -// GetHeadBlockTime returns the time of the head block. -func (vs Visor) GetHeadBlockTime() (uint64, error) { - var t uint64 - - if err := vs.db.View("GetHeadBlockTime", func(tx *dbutil.Tx) error { - var err error - t, err = vs.blockchain.Time(tx) - return err - }); err != nil { - return 0, err - } - - return t, nil -} - -// GetUxOutByID gets UxOut by hash id. -func (vs Visor) GetUxOutByID(id cipher.SHA256) (*historydb.UxOut, error) { - var outs []historydb.UxOut - - if err := vs.db.View("GetUxOutByID", func(tx *dbutil.Tx) error { - var err error - outs, err = vs.history.GetUxOuts(tx, []cipher.SHA256{id}) - return err - }); err != nil { - return nil, err - } - - if len(outs) == 0 { - return nil, nil - } - - return &outs[0], nil -} - -// GetSpentOutputsForAddresses gets all the spent outputs of a set of addresses -func (vs Visor) GetSpentOutputsForAddresses(addresses []cipher.Address) ([][]historydb.UxOut, error) { - out := make([][]historydb.UxOut, len(addresses)) - - if err := vs.db.View("GetSpentOutputsForAddresses", func(tx *dbutil.Tx) error { - for i, addr := range addresses { - addrUxOuts, err := vs.history.GetOutputsForAddress(tx, addr) - if err != nil { - return err - } - - out[i] = addrUxOuts - } - - return nil - }); err != nil { - return nil, err - } - - return out, nil -} - -// RecvOfAddresses returns unconfirmed receiving uxouts of addresses -func (vs *Visor) RecvOfAddresses(addrs []cipher.Address) (coin.AddressUxOuts, error) { - var uxouts coin.AddressUxOuts - - if err := vs.db.View("RecvOfAddresses", func(tx *dbutil.Tx) error { - head, err := vs.blockchain.Head(tx) - if err != nil { - return err - } - - uxouts, err = vs.unconfirmed.RecvOfAddresses(tx, head.Head, addrs) - return err - }); err != nil { - return nil, err - } - - return uxouts, nil -} - -// GetIncomingOutputs returns all predicted outputs that are in pending tx pool -func (vs *Visor) GetIncomingOutputs() (coin.UxArray, error) { - var uxa coin.UxArray - - if err := vs.db.View("GetIncomingOutputs", func(tx *dbutil.Tx) error { - head, err := vs.blockchain.Head(tx) - if err != nil { - return err - } - - uxa, err = vs.unconfirmed.GetIncomingOutputs(tx, head.Head) - return err - }); err != nil { - return nil, err - } - - return uxa, nil -} - -// GetUnconfirmedTxn gets an unconfirmed transaction from the DB -func (vs *Visor) GetUnconfirmedTxn(hash cipher.SHA256) (*UnconfirmedTransaction, error) { - var txn *UnconfirmedTransaction - - if err := vs.db.View("GetUnconfirmedTxn", func(tx *dbutil.Tx) error { - var err error - txn, err = vs.unconfirmed.Get(tx, hash) - return err - }); err != nil { - return nil, err - } - - return txn, nil -} - -// FilterKnownUnconfirmed returns unconfirmed txn hashes with known ones removed -func (vs *Visor) FilterKnownUnconfirmed(txns []cipher.SHA256) ([]cipher.SHA256, error) { - var hashes []cipher.SHA256 - - if err := vs.db.View("FilterKnownUnconfirmed", func(tx *dbutil.Tx) error { - var err error - hashes, err = vs.unconfirmed.FilterKnown(tx, txns) - return err - }); err != nil { - return nil, err - } - - return hashes, nil -} - -// GetKnownUnconfirmed returns unconfirmed txn hashes with known ones removed -func (vs *Visor) GetKnownUnconfirmed(txns []cipher.SHA256) (coin.Transactions, error) { - var hashes coin.Transactions - - if err := vs.db.View("GetKnownUnconfirmed", func(tx *dbutil.Tx) error { - var err error - hashes, err = vs.unconfirmed.GetKnown(tx, txns) - return err - }); err != nil { - return nil, err - } - - return hashes, nil -} - -// UnconfirmedSpendsOfAddresses returns all unconfirmed coin.UxOut spends of addresses -func (vs *Visor) UnconfirmedSpendsOfAddresses(addrs []cipher.Address) (coin.AddressUxOuts, error) { - var outs coin.AddressUxOuts - - if err := vs.db.View("UnconfirmedSpendsOfAddresses", func(tx *dbutil.Tx) error { - var err error - outs, err = vs.unconfirmedSpendsOfAddresses(tx, addrs) - return err - }); err != nil { - return nil, err - } - - return outs, nil -} - -// unconfirmedSpendsOfAddresses returns all unconfirmed coin.UxOut spends of addresses -func (vs *Visor) unconfirmedSpendsOfAddresses(tx *dbutil.Tx, addrs []cipher.Address) (coin.AddressUxOuts, error) { - txns, err := vs.unconfirmed.AllRawTransactions(tx) - if err != nil { - return nil, err - } - - var inputs []cipher.SHA256 - for _, txn := range txns { - inputs = append(inputs, txn.In...) - } - - uxa, err := vs.blockchain.Unspent().GetArray(tx, inputs) - if err != nil { - return nil, err - } - - outs := make(coin.AddressUxOuts, len(addrs)) - - addrm := make(map[cipher.Address]struct{}, len(addrs)) - for _, addr := range addrs { - addrm[addr] = struct{}{} - } - - for _, ux := range uxa { - if _, ok := addrm[ux.Body.Address]; ok { - outs[ux.Body.Address] = append(outs[ux.Body.Address], ux) - } - } - - return outs, nil -} - -// SetTransactionsAnnounced updates announced time of specific tx -func (vs *Visor) SetTransactionsAnnounced(hashes map[cipher.SHA256]int64) error { - if len(hashes) == 0 { - return nil - } - - return vs.db.Update("SetTransactionsAnnounced", func(tx *dbutil.Tx) error { - return vs.unconfirmed.SetTransactionsAnnounced(tx, hashes) - }) -} - -// GetBalanceOfAddresses returns balance pairs of given addreses -func (vs Visor) GetBalanceOfAddresses(addrs []cipher.Address) ([]wallet.BalancePair, error) { - if len(addrs) == 0 { - return nil, nil - } - - auxs := make(coin.AddressUxOuts, len(addrs)) - recvUxs := make(coin.AddressUxOuts, len(addrs)) - var uxa coin.UxArray - var head *coin.SignedBlock - - if err := vs.db.View("GetBalanceOfAddresses", func(tx *dbutil.Tx) error { - var err error - head, err = vs.blockchain.Head(tx) - if err != nil { - return err - } - - // Get all transactions from the unconfirmed pool - txns, err := vs.unconfirmed.AllRawTransactions(tx) - if err != nil { - return err - } - - // Create predicted unspent outputs from the unconfirmed transactions - recvUxs, err = txnOutputsForAddrs(head.Head, addrs, txns) - if err != nil { - return err - } - - var inputs []cipher.SHA256 - for _, txn := range txns { - inputs = append(inputs, txn.In...) - } - - // Get unspents for the inputs being spent - uxa, err = vs.blockchain.Unspent().GetArray(tx, inputs) - if err != nil { - return fmt.Errorf("GetArray failed when checking addresses balance: %v", err) - } - - // Get unspents owned by the addresses - auxs, err = vs.blockchain.Unspent().GetUnspentsOfAddrs(tx, addrs) - if err != nil { - return fmt.Errorf("GetUnspentsOfAddrs failed when checking addresses balance: %v", err) - } - - return nil - }); err != nil { - return nil, err - } - - // Build all unconfirmed transaction inputs that are associated with the addresses - spendUxs := make(coin.AddressUxOuts, len(addrs)) - - addrm := make(map[cipher.Address]struct{}, len(addrs)) - for _, addr := range addrs { - addrm[addr] = struct{}{} - } - - for _, ux := range uxa { - if _, ok := addrm[ux.Body.Address]; ok { - spendUxs[ux.Body.Address] = append(spendUxs[ux.Body.Address], ux) - } - } - - var bps []wallet.BalancePair - - headTime := head.Time() - for _, addr := range addrs { - uxs, ok := auxs[addr] - if !ok { - bps = append(bps, wallet.BalancePair{}) - continue - } - - outUxs := spendUxs[addr] - inUxs := recvUxs[addr] - predictedUxs := uxs.Sub(outUxs).Add(inUxs) - - coins, err := uxs.Coins() - if err != nil { - return nil, fmt.Errorf("uxs.Coins failed: %v", err) - } - - coinHours, err := uxs.CoinHours(headTime) - if err != nil { - switch err { - case coin.ErrAddEarnedCoinHoursAdditionOverflow: - coinHours = 0 - default: - return nil, fmt.Errorf("uxs.CoinHours failed: %v", err) - } - } - - pcoins, err := predictedUxs.Coins() - if err != nil { - return nil, fmt.Errorf("predictedUxs.Coins failed: %v", err) - } - - pcoinHours, err := predictedUxs.CoinHours(headTime) - if err != nil { - switch err { - case coin.ErrAddEarnedCoinHoursAdditionOverflow: - coinHours = 0 - default: - return nil, fmt.Errorf("predictedUxs.CoinHours failed: %v", err) - } - } - - bp := wallet.BalancePair{ - Confirmed: wallet.Balance{ - Coins: coins, - Hours: coinHours, - }, - Predicted: wallet.Balance{ - Coins: pcoins, - Hours: pcoinHours, - }, - } - - bps = append(bps, bp) - } - - return bps, nil -} - -// GetUnspentsOfAddrs returns unspent outputs of multiple addresses -func (vs *Visor) GetUnspentsOfAddrs(addrs []cipher.Address) (coin.AddressUxOuts, error) { - var uxa coin.AddressUxOuts - - if err := vs.db.View("GetUnspentsOfAddrs", func(tx *dbutil.Tx) error { - var err error - uxa, err = vs.blockchain.Unspent().GetUnspentsOfAddrs(tx, addrs) - return err - }); err != nil { - return nil, err - } - - return uxa, nil -} - -// VerifyTxnVerbose verifies a transaction, it returns transaction's input uxouts, whether the -// transaction is confirmed, and error if any -func (vs *Visor) VerifyTxnVerbose(txn *coin.Transaction, signed TxnSignedFlag) ([]TransactionInput, bool, error) { - var uxa coin.UxArray - var isTxnConfirmed bool - var feeCalcTime uint64 - - verifyErr := vs.db.View("VerifyTxnVerbose", func(tx *dbutil.Tx) error { - head, err := vs.blockchain.Head(tx) - if err != nil { - return err - } - - uxa, err = vs.blockchain.Unspent().GetArray(tx, txn.In) - switch e := err.(type) { - case nil: - // For unconfirmed transactions, use the blockchain head time to calculate hours - feeCalcTime = head.Time() - - case blockdb.ErrUnspentNotExist: - // Gets uxouts of txn.In from historydb - outs, err := vs.history.GetUxOuts(tx, txn.In) - if err != nil { - return err - } - - if len(outs) == 0 { - err = fmt.Errorf("transaction input of %s does not exist in either unspent pool or historydb", e.UxID) - return NewErrTxnViolatesHardConstraint(err) - } - - uxa = coin.UxArray{} - for _, out := range outs { - uxa = append(uxa, out.Out) - } - - // Checks if the transaction is confirmed - txnHash := txn.Hash() - historyTxn, err := vs.history.GetTransaction(tx, txnHash) - if err != nil { - return fmt.Errorf("get transaction of %v from historydb failed: %v", txnHash, err) - } - - if historyTxn != nil { - // Transaction is confirmed - isTxnConfirmed = true - } - - // For confirmed transactions, use the previous block time to calculate hours and fees, - // except for the genesis block which has no previous block and has no inputs nor fees. - feeCalcTime = 0 - if historyTxn.BlockSeq > 0 { - if isTxnConfirmed { - prevBlock, err := vs.blockchain.GetSignedBlockBySeq(tx, historyTxn.BlockSeq-1) - if err != nil { - return err - } - if prevBlock == nil { - return fmt.Errorf("VerifyTxnVerbose: previous block seq=%d not found", historyTxn.BlockSeq-1) - } - - feeCalcTime = prevBlock.Block.Head.Time - } - } - - return nil - default: - return err - } - - if err := VerifySingleTxnUserConstraints(*txn); err != nil { - return err - } - - if err := VerifySingleTxnSoftConstraints(*txn, feeCalcTime, uxa, vs.Config.Distribution, params.UserVerifyTxn); err != nil { - return err - } - - return VerifySingleTxnHardConstraints(*txn, head.Head, uxa, signed) - }) - - // If we were able to query the inputs, return the verbose inputs to the caller - // even if the transaction failed validation - var inputs []TransactionInput - if len(uxa) != 0 && feeCalcTime != 0 { - var err error - inputs, err = NewTransactionInputs(uxa, feeCalcTime) - if err != nil { - return nil, isTxnConfirmed, err - } - } - - return inputs, isTxnConfirmed, verifyErr -} - -// AddressCount returns the total number of addresses with unspents -func (vs *Visor) AddressCount() (uint64, error) { - var count uint64 - if err := vs.db.View("AddressCount", func(tx *dbutil.Tx) error { - var err error - count, err = vs.blockchain.Unspent().AddressCount(tx) - return err - }); err != nil { - return 0, err - } - - return count, nil -} - -// GetVerboseTransactionsForAddress returns verbose transaction data for a given address -func (vs *Visor) GetVerboseTransactionsForAddress(a cipher.Address) ([]Transaction, [][]TransactionInput, error) { - var txns []Transaction - var inputs [][]TransactionInput - - if err := vs.db.View("GetVerboseTransactionsForAddress", func(tx *dbutil.Tx) error { - addrTxns, err := vs.getTransactionsForAddresses(tx, []cipher.Address{a}) - if err != nil { - logger.Errorf("GetVerboseTransactionsForAddress: vs.GetTransactionsForAddress failed: %v", err) - return err - } - - txns = addrTxns[a] - if len(txns) == 0 { - return nil - } - - head, err := vs.blockchain.Head(tx) - if err != nil { - logger.Errorf("GetVerboseTransactionsForAddress: vs.blockchain.Head failed: %v", err) - return err - } - - inputs = make([][]TransactionInput, len(txns)) - - for i, txn := range txns { - // If the txn is confirmed, use the time of the block previous - // to the block in which the transaction was executed, - // else use the head time for unconfirmed blocks. - t := head.Time() - if txn.Status.Confirmed && txn.Status.BlockSeq > 0 { - prevBlock, err := vs.blockchain.GetSignedBlockBySeq(tx, txn.Status.BlockSeq-1) - if err != nil { - return err - } - - if prevBlock == nil { - return fmt.Errorf("GetVerboseTransactionsForAddress prevBlock seq=%d missing", txn.Status.BlockSeq-1) - } - - t = prevBlock.Block.Head.Time - } - - txnInputs := make([]TransactionInput, len(txn.Transaction.In)) - for j, inputID := range txn.Transaction.In { - uxOuts, err := vs.history.GetUxOuts(tx, []cipher.SHA256{inputID}) - if err != nil { - logger.Errorf("GetVerboseTransactionsForAddress: vs.history.GetUxOuts failed: %v", err) - return err - } - if len(uxOuts) == 0 { - err := fmt.Errorf("uxout of %v does not exist in history db", inputID.Hex()) - logger.Critical().Error(err) - return err - } - - input, err := NewTransactionInput(uxOuts[0].Out, t) - if err != nil { - logger.Errorf("GetVerboseTransactionsForAddress: NewTransactionInput failed: %v", err) - return err - } - - txnInputs[j] = input - } - - inputs[i] = txnInputs - } - - return nil - }); err != nil { - return nil, nil, err - } - - return txns, inputs, nil -} - -// OutputsFilter used as optional arguments in GetUnspentOutputs method -type OutputsFilter func(outputs coin.UxArray) coin.UxArray - -// FbyAddressesNotIncluded filters the unspent outputs that are not owned by the addresses -func FbyAddressesNotIncluded(addrs []cipher.Address) OutputsFilter { - return func(outputs coin.UxArray) coin.UxArray { - addrMatch := coin.UxArray{} - addrMap := newAddrSet(addrs) - - for _, u := range outputs { - if _, ok := addrMap[u.Body.Address]; !ok { - addrMatch = append(addrMatch, u) - } - } - return addrMatch - } -} - -// FbyAddresses filters the unspent outputs that owned by the addresses -func FbyAddresses(addrs []cipher.Address) OutputsFilter { - return func(outputs coin.UxArray) coin.UxArray { - addrMatch := coin.UxArray{} - addrMap := newAddrSet(addrs) - - for _, u := range outputs { - if _, ok := addrMap[u.Body.Address]; ok { - addrMatch = append(addrMatch, u) - } - } - return addrMatch - } -} - -// FbyHashes filters the unspent outputs that have hashes matched. -func FbyHashes(hashes []cipher.SHA256) OutputsFilter { - return func(outputs coin.UxArray) coin.UxArray { - hsMatch := coin.UxArray{} - hsMap := newSHA256Set(hashes) - - for _, u := range outputs { - if _, ok := hsMap[u.Hash()]; ok { - hsMatch = append(hsMatch, u) - } - } - return hsMatch - } -} - -func newAddrSet(keys []cipher.Address) map[cipher.Address]struct{} { - s := make(map[cipher.Address]struct{}, len(keys)) - for _, k := range keys { - s[k] = struct{}{} - } - return s -} - -// newSHA256Set returns a map-based set for string lookup -func newSHA256Set(keys []cipher.SHA256) map[cipher.SHA256]struct{} { - s := make(map[cipher.SHA256]struct{}, len(keys)) - for _, k := range keys { - s[k] = struct{}{} - } - return s -} - -// GetUnspentOutputsSummary gets unspent outputs and returns the filtered results, -// Note: all filters will be executed as the pending sequence in 'AND' mode. -func (vs *Visor) GetUnspentOutputsSummary(filters []OutputsFilter) (*UnspentOutputsSummary, error) { - var confirmedOutputs []coin.UxOut - var outgoingOutputs coin.UxArray - var incomingOutputs coin.UxArray - var head *coin.SignedBlock - - if err := vs.db.View("GetUnspentOutputsSummary", func(tx *dbutil.Tx) error { - var err error - head, err = vs.blockchain.Head(tx) - if err != nil { - return fmt.Errorf("vs.blockchain.Head failed: %v", err) - } - - confirmedOutputs, err = vs.blockchain.Unspent().GetAll(tx) - if err != nil { - return fmt.Errorf("vs.blockchain.Unspent().GetAll failed: %v", err) - } - - outgoingOutputs, err = vs.unconfirmedOutgoingOutputs(tx) - if err != nil { - return fmt.Errorf("vs.unconfirmedOutgoingOutputs failed: %v", err) - } - - incomingOutputs, err = vs.unconfirmedIncomingOutputs(tx) - if err != nil { - return fmt.Errorf("vs.unconfirmedIncomingOutputs failed: %v", err) - } - - return nil - }); err != nil { - return nil, err - } - - for _, flt := range filters { - confirmedOutputs = flt(confirmedOutputs) - outgoingOutputs = flt(outgoingOutputs) - incomingOutputs = flt(incomingOutputs) - } - - confirmed, err := NewUnspentOutputs(confirmedOutputs, head.Time()) - if err != nil { - return nil, err - } - - outgoing, err := NewUnspentOutputs(outgoingOutputs, head.Time()) - if err != nil { - return nil, err - } - - incoming, err := NewUnspentOutputs(incomingOutputs, head.Time()) - if err != nil { - return nil, err - } - - return &UnspentOutputsSummary{ - HeadBlock: head, - Confirmed: confirmed, - Outgoing: outgoing, - Incoming: incoming, - }, nil -} - -// GetRichlist returns a Richlist -func (vs *Visor) GetRichlist(includeDistribution bool) (Richlist, error) { - rbOuts, err := vs.GetUnspentOutputsSummary(nil) - if err != nil { - return nil, err - } - - // Build a map from addresses to total coins held - allAccounts := map[cipher.Address]uint64{} - for _, out := range rbOuts.Confirmed { - if _, ok := allAccounts[out.Body.Address]; ok { - var err error - allAccounts[out.Body.Address], err = mathutil.AddUint64(allAccounts[out.Body.Address], out.Body.Coins) - if err != nil { - return nil, err - } - } else { - allAccounts[out.Body.Address] = out.Body.Coins - } - } - - lockedAddrs := vs.Config.Distribution.LockedAddressesDecoded() - addrsMap := make(map[cipher.Address]struct{}, len(lockedAddrs)) - for _, a := range lockedAddrs { - addrsMap[a] = struct{}{} - } - - richlist, err := NewRichlist(allAccounts, addrsMap) - if err != nil { - return nil, err - } - - if !includeDistribution { - unlockedAddrs := vs.Config.Distribution.UnlockedAddressesDecoded() - for _, a := range unlockedAddrs { - addrsMap[a] = struct{}{} - } - richlist = richlist.FilterAddresses(addrsMap) - } - - return richlist, nil -} - -// WithUpdateTx executes a function inside of a db.Update transaction. -// This is exported for use by the daemon gateway's InjectBroadcastTransaction method. -// Do not use it for other purposes. -func (vs *Visor) WithUpdateTx(name string, f func(tx *dbutil.Tx) error) error { - return vs.db.Update(name, func(tx *dbutil.Tx) error { - return f(tx) - }) -} - -// AddressesActivity returns whether or not each address has any activity on blockchain -// or in the unconfirmed pool -func (vs *Visor) AddressesActivity(addrs []cipher.Address) ([]bool, error) { - active := make([]bool, len(addrs)) - addrsMap := make(map[cipher.Address]int, len(addrs)) - for i, a := range addrs { - addrsMap[a] = i - } - - if len(addrsMap) != len(addrs) { - return nil, errors.New("duplicates addresses not allowed") - } - - if err := vs.db.View("AddressActivity", func(tx *dbutil.Tx) error { - // Check if the addresses appear in the blockchain - for i, a := range addrs { - ok, err := vs.history.AddressSeen(tx, a) - if err != nil { - return err - } - - if ok { - active[i] = true - } - } - - // Check if the addresses appears in the unconfirmed pool - // NOTE: if this needs to be optimized, add an index to the unconfirmed pool - return vs.unconfirmed.ForEach(tx, func(h cipher.SHA256, ut UnconfirmedTransaction) error { - // Only transaction outputs need to be checked; if the address is associated - // with an input, it must have appeared in a transaction in the blockchain history - for _, o := range ut.Transaction.Out { - if i, ok := addrsMap[o.Address]; ok { - active[i] = true - } - } - return nil - }) - }); err != nil { - return nil, err - } - - return active, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/visor/visor_wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/visor/visor_wallet.go deleted file mode 100644 index 90394d3..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/visor/visor_wallet.go +++ /dev/null @@ -1,559 +0,0 @@ -package visor - -// This file contains Visor method that require wallet access - -import ( - "errors" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/params" - "github.com/SkycoinProject/skycoin/src/transaction" - "github.com/SkycoinProject/skycoin/src/util/mathutil" - "github.com/SkycoinProject/skycoin/src/visor/dbutil" - "github.com/SkycoinProject/skycoin/src/wallet" -) - -// UserError wraps user input-related errors. -// Errors caused by programmer input or internal issues should not use this wrapper. -// Some knowledge of the HTTP API layer may be necessary to decide when to use UserError or not. -type UserError struct { - error -} - -// NewUserError creates an Error -func NewUserError(err error) error { - if err == nil { - return nil - } - return UserError{err} -} - -var ( - // ErrSpendingUnconfirmed is returned if caller attempts to spend unconfirmed outputs - ErrSpendingUnconfirmed = NewUserError(errors.New("Please spend after your pending transaction is confirmed")) - // ErrDuplicateUxOuts UxOuts contains duplicate values - ErrDuplicateUxOuts = NewUserError(errors.New("UxOuts contains duplicate values")) - // ErrIncludesNullAddress Addresses must not contain the null address - ErrIncludesNullAddress = NewUserError(errors.New("Addresses must not contain the null address")) - // ErrDuplicateAddresses Addresses contains duplicate values - ErrDuplicateAddresses = NewUserError(errors.New("Addresses contains duplicate values")) - // ErrCreateTransactionParamsConflict UxOuts and Addresses cannot be combined - ErrCreateTransactionParamsConflict = NewUserError(errors.New("UxOuts and Addresses cannot be combined")) - // ErrTransactionAlreadySigned attempted to sign a transaction that is already fully signed - ErrTransactionAlreadySigned = NewUserError(errors.New("Transaction is already fully signed")) - // ErrUxOutsOrAddressesRequired Both Addresses and UxOuts are empty - ErrUxOutsOrAddressesRequired = NewUserError(errors.New("UxOuts or Addresses must not be empty")) - // ErrNoSpendableOutputs after filtering unconfirmed spend outputs, there are no remaining outputs available for transaction creation - ErrNoSpendableOutputs = NewUserError(errors.New("All selected outputs are unavailable for spending")) -) - -// GetWalletBalance returns balance pairs of specific wallet -func (vs *Visor) GetWalletBalance(wltID string) (wallet.BalancePair, wallet.AddressBalances, error) { - var addressBalances wallet.AddressBalances - var walletBalance wallet.BalancePair - var addrsBalanceList []wallet.BalancePair - var addrs []cipher.Address - - if err := vs.wallets.View(wltID, func(w wallet.Wallet) error { - var err error - addrs, err = w.GetSkycoinAddresses() - if err != nil { - return err - } - - addrsBalanceList, err = vs.GetBalanceOfAddresses(addrs) - return err - }); err != nil { - return walletBalance, addressBalances, err - } - - // create map of address to balance - addressBalances = make(wallet.AddressBalances, len(addrs)) - for i, addr := range addrs { - addressBalances[addr.String()] = addrsBalanceList[i] - } - - // compute the sum of all addresses - for _, addrBalance := range addressBalances { - var err error - // compute confirmed balance - walletBalance.Confirmed.Coins, err = mathutil.AddUint64(walletBalance.Confirmed.Coins, addrBalance.Confirmed.Coins) - if err != nil { - return walletBalance, addressBalances, err - } - walletBalance.Confirmed.Hours, err = mathutil.AddUint64(walletBalance.Confirmed.Hours, addrBalance.Confirmed.Hours) - if err != nil { - return walletBalance, addressBalances, err - } - - // compute predicted balance - walletBalance.Predicted.Coins, err = mathutil.AddUint64(walletBalance.Predicted.Coins, addrBalance.Predicted.Coins) - if err != nil { - return walletBalance, addressBalances, err - } - walletBalance.Predicted.Hours, err = mathutil.AddUint64(walletBalance.Predicted.Hours, addrBalance.Predicted.Hours) - if err != nil { - return walletBalance, addressBalances, err - } - } - - return walletBalance, addressBalances, nil -} - -// GetWalletUnconfirmedTransactions returns all unconfirmed transactions in given wallet -func (vs *Visor) GetWalletUnconfirmedTransactions(wltID string) ([]UnconfirmedTransaction, error) { - var txns []UnconfirmedTransaction - - if err := vs.wallets.View(wltID, func(w wallet.Wallet) error { - addrs, err := w.GetSkycoinAddresses() - if err != nil { - return err - } - - txns, err = vs.GetUnconfirmedTransactions(SendsToAddresses(addrs)) - return err - }); err != nil { - return nil, err - } - - return txns, nil -} - -// GetWalletUnconfirmedTransactionsVerbose returns all unconfirmed transactions in given wallet -func (vs *Visor) GetWalletUnconfirmedTransactionsVerbose(wltID string) ([]UnconfirmedTransaction, [][]TransactionInput, error) { - var txns []UnconfirmedTransaction - var inputs [][]TransactionInput - - if err := vs.wallets.View(wltID, func(w wallet.Wallet) error { - addrs, err := w.GetSkycoinAddresses() - if err != nil { - return err - } - - txns, inputs, err = vs.GetUnconfirmedTransactionsVerbose(SendsToAddresses(addrs)) - return err - }); err != nil { - return nil, nil, err - } - - return txns, inputs, nil -} - -// WalletSignTransaction signs a transaction. Specific inputs may be signed by specifying signIndexes. -// If signIndexes is empty, all inputs will be signed. The transaction must be fully valid and spendable. -func (vs *Visor) WalletSignTransaction(wltID string, password []byte, txn *coin.Transaction, signIndexes []int) (*coin.Transaction, []TransactionInput, error) { - var inputs []TransactionInput - var signedTxn *coin.Transaction - - if txn.IsFullySigned() { - return nil, nil, ErrTransactionAlreadySigned - } - - if err := vs.wallets.ViewSecrets(wltID, password, func(w wallet.Wallet) error { - return vs.db.View("WalletSignTransaction", func(tx *dbutil.Tx) error { - // Verify the transaction before signing - if err := VerifySingleTxnUserConstraints(*txn); err != nil { - return err - } - if _, _, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, *txn, vs.Config.Distribution, params.UserVerifyTxn, TxnUnsigned); err != nil { - return err - } - - headTime, err := vs.blockchain.Time(tx) - if err != nil { - logger.WithError(err).Error("blockchain.Time failed") - return err - } - - inputs, err = vs.getTransactionInputs(tx, headTime, txn.In) - if err != nil { - return err - } - - uxOuts := make([]coin.UxOut, len(inputs)) - for i, in := range inputs { - uxOuts[i] = in.UxOut - } - - signedTxn, err = wallet.SignTransaction(w, txn, signIndexes, uxOuts) - if err != nil { - logger.WithError(err).Error("wallet.SignTransaction failed") - return err - } - - signed := TxnSigned - if !signedTxn.IsFullySigned() { - signed = TxnUnsigned - } - - if err := VerifySingleTxnUserConstraints(*signedTxn); err != nil { - // This shouldn't happen since we verified in the beginning; if it does, then wallet.SignTransaction has a bug - logger.Critical().WithError(err).Error("Signed transaction violates transaction user constraints") - return err - } - - if _, _, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, *signedTxn, vs.Config.Distribution, params.UserVerifyTxn, signed); err != nil { - // This shouldn't happen since we verified in the beginning; if it does, then wallet.SignTransaction has a bug - logger.Critical().WithError(err).Error("Signed transaction violates transaction constraints") - return err - } - - return nil - }) - }); err != nil { - return nil, nil, err - } - - return signedTxn, inputs, nil -} - -// CreateTransactionParams parameters for transaction creation -type CreateTransactionParams struct { - UxOuts []cipher.SHA256 - Addresses []cipher.Address - // IgnoreUnconfirmed if true, outputs matching Addresses or UxOuts spent by - // an unconfirmed transactions will be ignored, otherwise an error will be returned - IgnoreUnconfirmed bool -} - -// Validate validates params -func (p CreateTransactionParams) Validate() error { - if len(p.UxOuts) != 0 && len(p.Addresses) != 0 { - return ErrCreateTransactionParamsConflict - } - - // Check for duplicate addresses - addressMap := make(map[cipher.Address]struct{}, len(p.Addresses)) - for _, a := range p.Addresses { - if a.Null() { - return ErrIncludesNullAddress - } - - if _, ok := addressMap[a]; ok { - return ErrDuplicateAddresses - } - - addressMap[a] = struct{}{} - } - - // Check for duplicate spending uxouts - uxOuts := make(map[cipher.SHA256]struct{}, len(p.UxOuts)) - for _, o := range p.UxOuts { - if _, ok := uxOuts[o]; ok { - return ErrDuplicateUxOuts - } - uxOuts[o] = struct{}{} - } - - return nil -} - -// WalletCreateTransactionSigned creates a signed transaction based upon the parameters in CreateTransactionParams -func (vs *Visor) WalletCreateTransactionSigned(wltID string, password []byte, p transaction.Params, wp CreateTransactionParams) (*coin.Transaction, []TransactionInput, error) { - // Validate params before unlocking wallet - if err := p.Validate(); err != nil { - return nil, nil, err - } - if err := wp.Validate(); err != nil { - return nil, nil, err - } - - var txn *coin.Transaction - var inputs []TransactionInput - - if err := vs.wallets.UpdateSecrets(wltID, password, func(w wallet.Wallet) error { - var err error - txn, inputs, err = vs.walletCreateTransaction("WalletCreateTransactionSigned", w, p, wp, TxnSigned) - return err - }); err != nil { - return nil, nil, err - } - - return txn, inputs, nil -} - -// WalletCreateTransaction creates a transaction based upon the parameters in CreateTransactionParams -func (vs *Visor) WalletCreateTransaction(wltID string, p transaction.Params, wp CreateTransactionParams) (*coin.Transaction, []TransactionInput, error) { - // Validate params before opening wallet - if err := p.Validate(); err != nil { - return nil, nil, err - } - if err := wp.Validate(); err != nil { - return nil, nil, err - } - - var txn *coin.Transaction - var inputs []TransactionInput - - if err := vs.wallets.Update(wltID, func(w wallet.Wallet) error { - var err error - txn, inputs, err = vs.walletCreateTransaction("WalletCreateTransaction", w, p, wp, TxnUnsigned) - return err - }); err != nil { - return nil, nil, err - } - - return txn, inputs, nil -} - -func (vs *Visor) walletCreateTransaction(methodName string, w wallet.Wallet, p transaction.Params, wp CreateTransactionParams, signed TxnSignedFlag) (*coin.Transaction, []TransactionInput, error) { - if err := p.Validate(); err != nil { - return nil, nil, err - } - if err := wp.Validate(); err != nil { - return nil, nil, err - } - - // Get all addresses from the wallet for checking params against - walletAddresses, err := w.GetSkycoinAddresses() - if err != nil { - return nil, nil, err - } - - walletAddressesMap := make(map[cipher.Address]struct{}, len(walletAddresses)) - for _, a := range walletAddresses { - walletAddressesMap[a] = struct{}{} - } - - addrs := wp.Addresses - if len(addrs) == 0 { - // Use all wallet addresses if no addresses or uxouts specified - addrs = walletAddresses - } else { - // Check that requested addresses are in the wallet - for _, a := range addrs { - if _, ok := walletAddressesMap[a]; !ok { - return nil, nil, wallet.ErrUnknownAddress - } - } - } - - var txn *coin.Transaction - var uxb []transaction.UxBalance - - if err := vs.db.View(methodName, func(tx *dbutil.Tx) error { - var err error - txn, uxb, err = vs.walletCreateTransactionTx(tx, methodName, w, p, wp, signed, addrs, walletAddressesMap) - return err - }); err != nil { - return nil, nil, err - } - - inputs := NewTransactionInputsFromUxBalance(uxb) - - return txn, inputs, nil -} - -func (vs *Visor) walletCreateTransactionTx(tx *dbutil.Tx, methodName string, - w wallet.Wallet, p transaction.Params, wp CreateTransactionParams, signed TxnSignedFlag, - addrs []cipher.Address, walletAddressesMap map[cipher.Address]struct{}) (*coin.Transaction, []transaction.UxBalance, error) { - // Note: assumes inputs have already been validated by walletCreateTransaction - - head, err := vs.blockchain.Head(tx) - if err != nil { - logger.WithError(err).Error("blockchain.Head failed") - return nil, nil, err - } - - // Get mapping of addresses to uxOuts based upon CreateTransactionParams - var auxs coin.AddressUxOuts - if len(wp.UxOuts) != 0 { - var err error - auxs, err = vs.getCreateTransactionAuxsUxOut(tx, wp.UxOuts, wp.IgnoreUnconfirmed) - if err != nil { - return nil, nil, err - } - - // Check that UxOut addresses are in the wallet, - for a := range auxs { - if _, ok := walletAddressesMap[a]; !ok { - return nil, nil, wallet.ErrUnknownUxOut - } - } - } else { - var err error - auxs, err = vs.getCreateTransactionAuxsAddress(tx, addrs, wp.IgnoreUnconfirmed) - if err != nil { - return nil, nil, err - } - } - - // Create and sign transaction - var txn *coin.Transaction - var uxb []transaction.UxBalance - - switch signed { - case TxnSigned: - txn, uxb, err = wallet.CreateTransactionSigned(w, p, auxs, head.Time()) - case TxnUnsigned: - txn, uxb, err = wallet.CreateTransaction(w, p, auxs, head.Time()) - default: - logger.Panic("Invalid TxnSignedFlag") - } - if err != nil { - logger.Critical().WithError(err).Errorf("%s failed", methodName) - return nil, nil, err - } - - if err := VerifySingleTxnUserConstraints(*txn); err != nil { - logger.WithError(err).Error("Created transaction violates transaction user constraints") - return nil, nil, err - } - - // The wallet can create transactions that would not pass all validation, such as the decimal restriction, - // because the wallet is not aware of visor-level constraints. - // Check that the transaction is valid before returning it to the caller. - // TODO -- decimal restriction was moved to params/ package so the wallet can verify now. Move visor/verify to new package? - if _, _, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, *txn, vs.Config.Distribution, params.UserVerifyTxn, signed); err != nil { - logger.WithError(err).Error("Created transaction violates transaction soft/hard constraints") - return nil, nil, err - } - - return txn, uxb, nil -} - -// CreateTransaction creates an unsigned transaction from requested coin.UxOut hashes -func (vs *Visor) CreateTransaction(p transaction.Params, wp CreateTransactionParams) (*coin.Transaction, []TransactionInput, error) { - // Validate parameters before starting database transaction - if err := p.Validate(); err != nil { - return nil, nil, err - } - if err := wp.Validate(); err != nil { - return nil, nil, err - } - if len(wp.Addresses) == 0 && len(wp.UxOuts) == 0 { - return nil, nil, ErrUxOutsOrAddressesRequired - } - - var txn *coin.Transaction - var uxb []transaction.UxBalance - - if err := vs.db.View("CreateTransaction", func(tx *dbutil.Tx) error { - var err error - txn, uxb, err = vs.createTransactionTx(tx, p, wp) - return err - }); err != nil { - return nil, nil, err - } - - inputs := NewTransactionInputsFromUxBalance(uxb) - - return txn, inputs, nil -} - -func (vs *Visor) createTransactionTx(tx *dbutil.Tx, p transaction.Params, wp CreateTransactionParams) (*coin.Transaction, []transaction.UxBalance, error) { - // Note: assumes inputs have already been validated by walletCreateTransaction - head, err := vs.blockchain.Head(tx) - if err != nil { - logger.WithError(err).Error("blockchain.Head failed") - return nil, nil, err - } - - // Get mapping of addresses to uxOuts based upon CreateTransactionParams - var auxs coin.AddressUxOuts - if len(wp.UxOuts) != 0 { - auxs, err = vs.getCreateTransactionAuxsUxOut(tx, wp.UxOuts, wp.IgnoreUnconfirmed) - } else { - auxs, err = vs.getCreateTransactionAuxsAddress(tx, wp.Addresses, wp.IgnoreUnconfirmed) - } - if err != nil { - return nil, nil, err - } - - txn, uxb, err := transaction.Create(p, auxs, head.Time()) - if err != nil { - return nil, nil, err - } - - if err := VerifySingleTxnUserConstraints(*txn); err != nil { - logger.WithError(err).Error("Created transaction violates transaction user constraints") - return nil, nil, err - } - - // The wallet can create transactions that would not pass all validation, such as the decimal restriction, - // because the wallet is not aware of visor-level constraints. - // Check that the transaction is valid before returning it to the caller. - // TODO -- decimal restriction was moved to params/ package so the wallet can verify now. Move visor/verify to new package? - if _, _, err := vs.blockchain.VerifySingleTxnSoftHardConstraints(tx, *txn, vs.Config.Distribution, params.UserVerifyTxn, TxnUnsigned); err != nil { - logger.WithError(err).Error("Created transaction violates transaction soft/hard constraints") - return nil, nil, err - } - - return txn, uxb, nil -} - -// getCreateTransactionAuxsUxOut returns a map of addresses to their unspent outputs, -// given a list of unspent output hashes. -// If ignoreUnconfirmed is true, outputs being spent by unconfirmed transactions are ignored and excluded from the return value. -// If ignoreUnconfirmed is false, an error is return if any of the specified unspent outputs are spent by an unconfirmed transaction. -func (vs *Visor) getCreateTransactionAuxsUxOut(tx *dbutil.Tx, uxOutHashes []cipher.SHA256, ignoreUnconfirmed bool) (coin.AddressUxOuts, error) { - hashesMap := make(map[cipher.SHA256]struct{}, len(uxOutHashes)) - for _, h := range uxOutHashes { - hashesMap[h] = struct{}{} - } - - // Check if any of the outputs are spent by an unconfirmed transaction - unconfirmedHashesMap := make(map[cipher.SHA256]struct{}) - if err := vs.unconfirmed.ForEach(tx, func(_ cipher.SHA256, txn UnconfirmedTransaction) error { - for _, h := range txn.Transaction.In { - if _, ok := hashesMap[h]; ok { - if !ignoreUnconfirmed { - return ErrSpendingUnconfirmed - } - unconfirmedHashesMap[h] = struct{}{} - } - } - return nil - }); err != nil { - return nil, err - } - - if !ignoreUnconfirmed && len(unconfirmedHashesMap) != 0 { - logger.Panic("ignoreUnconfirmed is false but unconfirmedHashesMap is not empty") - } - - // Filter unconfirmed spends - if len(unconfirmedHashesMap) != 0 { - filteredUxOutHashes := uxOutHashes[:0] - for _, h := range uxOutHashes { - if _, ok := unconfirmedHashesMap[h]; ok { - delete(hashesMap, h) - } else { - filteredUxOutHashes = append(filteredUxOutHashes, h) - } - } - uxOutHashes = filteredUxOutHashes - } - - if len(uxOutHashes) == 0 { - return nil, ErrNoSpendableOutputs - } - - // Retrieve the uxouts from the pool. - // An error is returned if any do not exist - uxOuts, err := vs.blockchain.Unspent().GetArray(tx, uxOutHashes) - if err != nil { - return nil, err - } - - // Build coin.AddressUxOuts map - return coin.NewAddressUxOuts(coin.UxArray(uxOuts)), nil -} - -// getCreateTransactionAuxsAddress returns a map of the addresses to their unspent outputs, -// filtering or erroring on unconfirmed outputs depending on the value of ignoreUnconfirmed -func (vs *Visor) getCreateTransactionAuxsAddress(tx *dbutil.Tx, addrs []cipher.Address, ignoreUnconfirmed bool) (coin.AddressUxOuts, error) { - // Get all address unspent hashes - addrHashes, err := vs.blockchain.Unspent().GetUnspentHashesOfAddrs(tx, addrs) - if err != nil { - return nil, err - } - - hashes := addrHashes.Flatten() - if len(hashes) == 0 { - return nil, transaction.ErrNoUnspents - } - - return vs.getCreateTransactionAuxsUxOut(tx, hashes, ignoreUnconfirmed) -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/balance.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/balance.go deleted file mode 100644 index e9edbe1..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/balance.go +++ /dev/null @@ -1,87 +0,0 @@ -package wallet - -import ( - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -/* -Do not show balances or outputs that have not cleared yet -- should only allow spends against outputs that are on head -*/ - -// BalancePair records the confirmed and predicted balance of an address -type BalancePair struct { - Confirmed Balance - Predicted Balance -} - -// AddressBalances represents a map of address balances -type AddressBalances map[string]BalancePair - -// Balance has coins and hours -type Balance struct { - Coins uint64 - Hours uint64 -} - -// NewBalance creates balance -func NewBalance(coins, hours uint64) Balance { - return Balance{ - Coins: coins, - Hours: hours, - } -} - -// NewBalanceFromUxOut creates Balance from UxOut -func NewBalanceFromUxOut(headTime uint64, ux *coin.UxOut) (Balance, error) { - hours, err := ux.CoinHours(headTime) - if err != nil { - return Balance{}, err - } - - return Balance{ - Coins: ux.Body.Coins, - Hours: hours, - }, nil -} - -// Add adds two Balances -func (bal Balance) Add(other Balance) (Balance, error) { - coins, err := mathutil.AddUint64(bal.Coins, other.Coins) - if err != nil { - return Balance{}, err - } - - hours, err := mathutil.AddUint64(bal.Hours, other.Hours) - if err != nil { - return Balance{}, err - } - - return Balance{ - Coins: coins, - Hours: hours, - }, nil -} - -// Sub subtracts other from self and returns the new Balance. Will panic if -// other is greater than balance, because Coins and Hours are unsigned. -func (bal Balance) Sub(other Balance) Balance { - if other.Coins > bal.Coins || other.Hours > bal.Hours { - logger.Panic("Cannot subtract balances, second balance is too large") - } - return Balance{ - Coins: bal.Coins - other.Coins, - Hours: bal.Hours - other.Hours, - } -} - -// Equals compares two Balances -func (bal Balance) Equals(other Balance) bool { - return bal.Coins == other.Coins && bal.Hours == other.Hours -} - -// IsZero returns true if the Balance is empty (both coins and hours) -func (bal Balance) IsZero() bool { - return bal.Coins == 0 && bal.Hours == 0 -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/bip44_wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/bip44_wallet.go deleted file mode 100644 index e30b052..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/bip44_wallet.go +++ /dev/null @@ -1,535 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - "math" - "sort" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip32" - "github.com/SkycoinProject/skycoin/src/cipher/bip39" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/util/file" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -// Bip44Wallet manages keys using the original Skycoin deterministic -// keypair generator method. -// With this generator, a single chain of addresses is created, each one dependent -// on the previous. -type Bip44Wallet struct { - Meta - ExternalEntries Entries - ChangeEntries Entries -} - -// newBip44Wallet creates a Bip44Wallet -func newBip44Wallet(meta Meta) (*Bip44Wallet, error) { //nolint:unparam - return &Bip44Wallet{ - Meta: meta, - }, nil -} - -// PackSecrets copies data from decrypted wallets into the secrets container -func (w *Bip44Wallet) PackSecrets(ss Secrets) { - ss.set(secretSeed, w.Meta.Seed()) - ss.set(secretSeedPassphrase, w.Meta.SeedPassphrase()) - - // Saves entry secret keys in secrets - for _, e := range w.ExternalEntries { - ss.set(e.Address.String(), e.Secret.Hex()) - } - for _, e := range w.ChangeEntries { - ss.set(e.Address.String(), e.Secret.Hex()) - } -} - -// UnpackSecrets copies data from decrypted secrets into the wallet -func (w *Bip44Wallet) UnpackSecrets(ss Secrets) error { - seed, ok := ss.get(secretSeed) - if !ok { - return errors.New("seed doesn't exist in secrets") - } - w.Meta.setSeed(seed) - - passphrase, _ := ss.get(secretSeedPassphrase) - w.Meta.setSeedPassphrase(passphrase) - - if err := w.ExternalEntries.unpackSecretKeys(ss); err != nil { - return err - } - return w.ChangeEntries.unpackSecretKeys(ss) -} - -// Clone clones the wallet a new wallet object -func (w *Bip44Wallet) Clone() Wallet { - return &Bip44Wallet{ - Meta: w.Meta.clone(), - ExternalEntries: w.ExternalEntries.clone(), - ChangeEntries: w.ChangeEntries.clone(), - } -} - -// CopyFrom copies the src wallet to w -func (w *Bip44Wallet) CopyFrom(src Wallet) { - w.Meta = src.(*Bip44Wallet).Meta.clone() - w.ExternalEntries = src.(*Bip44Wallet).ExternalEntries.clone() - w.ChangeEntries = src.(*Bip44Wallet).ChangeEntries.clone() -} - -// CopyFromRef copies the src wallet with a pointer dereference -func (w *Bip44Wallet) CopyFromRef(src Wallet) { - *w = *(src.(*Bip44Wallet)) -} - -// Erase wipes secret fields in wallet -func (w *Bip44Wallet) Erase() { - w.Meta.eraseSeeds() - w.ExternalEntries.erase() - w.ChangeEntries.erase() -} - -// ToReadable converts the wallet to its readable (serializable) format -func (w *Bip44Wallet) ToReadable() Readable { - return NewReadableBip44Wallet(w) -} - -// Validate validates the wallet -func (w *Bip44Wallet) Validate() error { - return w.Meta.validate() -} - -// GetAddresses returns all addresses in wallet -func (w *Bip44Wallet) GetAddresses() []cipher.Addresser { - return append(w.ExternalEntries.getAddresses(), w.ChangeEntries.getAddresses()...) -} - -// GetSkycoinAddresses returns all Skycoin addresses in wallet. The wallet's coin type must be Skycoin. -func (w *Bip44Wallet) GetSkycoinAddresses() ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("Bip44Wallet coin type is not skycoin") - } - - return append(w.ExternalEntries.getSkycoinAddresses(), w.ChangeEntries.getSkycoinAddresses()...), nil -} - -// GetEntries returns a copy of all entries held by the wallet -func (w *Bip44Wallet) GetEntries() Entries { - if w.EntriesLen() == 0 { - return nil - } - return append(w.ExternalEntries.clone(), w.ChangeEntries.clone()...) -} - -// EntriesLen returns the number of entries in the wallet -func (w *Bip44Wallet) EntriesLen() int { - return len(w.ExternalEntries) + len(w.ChangeEntries) -} - -// GetEntryAt returns entry at a given index in the entries array -func (w *Bip44Wallet) GetEntryAt(i int) Entry { - if i >= len(w.ExternalEntries) { - return w.ChangeEntries[i-len(w.ExternalEntries)] - } - return w.ExternalEntries[i] -} - -// GetEntry returns entry of given address -func (w *Bip44Wallet) GetEntry(a cipher.Address) (Entry, bool) { - if e, ok := w.ExternalEntries.get(a); ok { - return e, true - } - - return w.ChangeEntries.get(a) -} - -// HasEntry returns true if the wallet has an Entry with a given cipher.Address. -func (w *Bip44Wallet) HasEntry(a cipher.Address) bool { - return w.ExternalEntries.has(a) || w.ChangeEntries.has(a) -} - -// CoinHDNode return the "coin" level bip44 HDNode -func (w *Bip44Wallet) CoinHDNode() (*bip44.Coin, error) { - // w.Meta.Seed() must return a valid bip39 mnemonic - seed, err := bip39.NewSeed(w.Meta.Seed(), w.Meta.SeedPassphrase()) - if err != nil { - return nil, err - } - - c, err := bip44.NewCoin(seed, w.Meta.Bip44Coin()) - if err != nil { - logger.Critical().WithError(err).Error("Failed to derive the bip44 purpose node") - if bip32.IsImpossibleChildError(err) { - logger.Critical().Error("ImpossibleChild: this seed cannot be used for bip44") - } - return nil, err - } - - return c, nil -} - -// nextChildIdx returns the next child index from a sequence of entries. -// This assumes that entries are sorted by child number ascending. -func nextChildIdx(e Entries) uint32 { - if len(e) == 0 { - return 0 - } - return e[len(e)-1].ChildNumber + 1 -} - -// generateEntries generates addresses for a change chain (should be 0 or 1) starting from an initial child number. -func (w *Bip44Wallet) generateEntries(num uint64, changeIdx, initialChildIdx uint32) (Entries, error) { - if w.Meta.IsEncrypted() { - return nil, ErrWalletEncrypted - } - - if num > math.MaxUint32 { - return nil, NewError(errors.New("Bip44Wallet.generateEntries num too large")) - } - - // Cap `num` in case it would exceed the maximum child index number - if math.MaxUint32-initialChildIdx < uint32(num) { - num = uint64(math.MaxUint32 - initialChildIdx) - } - - if num == 0 { - return nil, nil - } - - c, err := w.CoinHDNode() - if err != nil { - return nil, err - } - - // Generate the "account" HDNode. Multiple accounts are not supported; use 0. - account, err := c.Account(0) - if err != nil { - logger.Critical().WithError(err).Error("Failed to derive the bip44 account node") - if bip32.IsImpossibleChildError(err) { - logger.Critical().Error("ImpossibleChild: this seed cannot be used for bip44") - } - return nil, err - } - - // Generate the external chain parent node - chain, err := account.NewPrivateChildKey(changeIdx) - if err != nil { - logger.Critical().WithError(err).Error("Failed to derive the final bip44 chain node") - if bip32.IsImpossibleChildError(err) { - logger.Critical().Error("ImpossibleChild: this seed cannot be used for bip44") - } - return nil, err - } - - // Generate `num` secret keys from the external chain HDNode, skipping any children that - // are invalid (note that this has probability ~2^-128) - var seckeys []*bip32.PrivateKey - var addressIndices []uint32 - j := initialChildIdx - for i := uint32(0); i < uint32(num); i++ { - k, err := chain.NewPrivateChildKey(j) - - var addErr error - j, addErr = mathutil.AddUint32(j, 1) - if addErr != nil { - logger.Critical().WithError(addErr).WithFields(logrus.Fields{ - "num": num, - "initialChildIdx": initialChildIdx, - "accountIdx": 0, - "changeIdx": changeIdx, - "childIdx": j, - "i": i, - }).Error("childIdx can't be incremented any further") - return nil, errors.New("childIdx can't be incremented any further") - } - - if err != nil { - if bip32.IsImpossibleChildError(err) { - logger.Critical().WithError(err).WithFields(logrus.Fields{ - "accountIdx": 0, - "changeIdx": changeIdx, - "childIdx": j, - }).Error("ImpossibleChild for chain node child element") - continue - } else { - logger.Critical().WithError(err).WithFields(logrus.Fields{ - "accountIdx": 0, - "changeIdx": changeIdx, - "childIdx": j, - }).Error("NewPrivateChildKey failed unexpectedly") - return nil, err - } - } - - seckeys = append(seckeys, k) - addressIndices = append(addressIndices, j-1) - } - - entries := make(Entries, len(seckeys)) - makeAddress := w.Meta.AddressConstructor() - for i, xprv := range seckeys { - sk := cipher.MustNewSecKey(xprv.Key) - pk := cipher.MustPubKeyFromSecKey(sk) - entries[i] = Entry{ - Address: makeAddress(pk), - Secret: sk, - Public: pk, - ChildNumber: addressIndices[i], - Change: changeIdx, - } - } - - return entries, nil -} - -// PeekChangeEntry creates and returns an entry for the change chain. -// If used, the caller the append it with GenerateChangeEntry -func (w *Bip44Wallet) PeekChangeEntry() (Entry, error) { - entries, err := w.generateEntries(1, bip44.ChangeChainIndex, nextChildIdx(w.ChangeEntries)) - if err != nil { - return Entry{}, err - } - - if len(entries) == 0 { - return Entry{}, NewError(errors.New("PeekChangeEntry: no more change addresses")) - } - - return entries[0], nil -} - -// GenerateChangeEntry creates, appends and returns an entry for the change chain -func (w *Bip44Wallet) GenerateChangeEntry() (Entry, error) { - e, err := w.PeekChangeEntry() - if err != nil { - return Entry{}, err - } - - w.ChangeEntries = append(w.ChangeEntries, Entries{e}...) - - return w.ChangeEntries[len(w.ChangeEntries)-1], nil -} - -// GenerateAddresses generates addresses for the external chain, and appends them to the wallet's entries array -func (w *Bip44Wallet) GenerateAddresses(num uint64) ([]cipher.Addresser, error) { - entries, err := w.generateEntries(num, bip44.ExternalChainIndex, nextChildIdx(w.ExternalEntries)) - if err != nil { - return nil, err - } - - w.ExternalEntries = append(w.ExternalEntries, entries...) - - return entries.getAddresses(), nil -} - -// GenerateSkycoinAddresses generates Skycoin addresses for the external chain, and appends them to the wallet's entries array. -// If the wallet's coin type is not Skycoin, returns an error -func (w *Bip44Wallet) GenerateSkycoinAddresses(num uint64) ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("GenerateSkycoinAddresses called for non-skycoin wallet") - } - - entries, err := w.generateEntries(num, bip44.ExternalChainIndex, nextChildIdx(w.ExternalEntries)) - if err != nil { - return nil, err - } - - w.ExternalEntries = append(w.ExternalEntries, entries...) - - return entries.getSkycoinAddresses(), nil -} - -// ScanAddresses scans ahead N addresses, truncating up to the highest address with any transaction history. -func (w *Bip44Wallet) ScanAddresses(scanN uint64, tf TransactionsFinder) error { - if w.Meta.IsEncrypted() { - return ErrWalletEncrypted - } - - if scanN == 0 { - return nil - } - - w2 := w.Clone().(*Bip44Wallet) - - externalEntries, err := scanAddressesBip32(func(num uint64, childIdx uint32) (Entries, error) { - return w.generateEntries(num, bip44.ExternalChainIndex, childIdx) - }, scanN, tf, nextChildIdx(w2.ExternalEntries)) - if err != nil { - return err - } - - changeEntries, err := scanAddressesBip32(func(num uint64, childIdx uint32) (Entries, error) { - return w.generateEntries(num, bip44.ChangeChainIndex, childIdx) - }, scanN, tf, nextChildIdx(w2.ChangeEntries)) - if err != nil { - return err - } - - // Add scanned entries - w2.ExternalEntries = append(w2.ExternalEntries, externalEntries...) - w2.ChangeEntries = append(w2.ChangeEntries, changeEntries...) - - *w = *w2 - - return nil -} - -// scanAddressesBip32 implements the address scanning algorithm for bip32 -// based (e.g. bip44, xpub) wallets -func scanAddressesBip32(generateEntries func(num uint64, childIdx uint32) (Entries, error), scanN uint64, tf TransactionsFinder, initialChildIdx uint32) (Entries, error) { - if scanN == 0 { - return nil, nil - } - - nAddAddrs := uint64(0) - n := scanN - extraScan := uint64(0) - childIdx := initialChildIdx - var newEntries Entries - - for { - // Generate the addresses to scan - entries, err := generateEntries(n, childIdx) - if err != nil { - return nil, err - } - - if len(entries) == 0 { - break - } - - // The bip32 child key sequence is finite and may be truncated at its limit - n = uint64(len(entries)) - if n == 0 { - break - } - - childIdx = nextChildIdx(entries) - - newEntries = append(newEntries, entries...) - - addrs := entries.getSkycoinAddresses() - - // Find if these addresses had any activity - active, err := tf.AddressesActivity(addrs) - if err != nil { - return nil, err - } - - // Check activity from the last one until we find the address that has activity - var keepNum uint64 - for i := len(active) - 1; i >= 0; i-- { - if active[i] { - keepNum = uint64(i + 1) - break - } - } - - if keepNum == 0 { - break - } - - nAddAddrs += keepNum + extraScan - - if n < keepNum { - logger.Panic("n should never be less than keepNum") - } - - // extraScan is the number of addresses with no activity beyond the - // last address with activity - extraScan = n - keepNum - - // n is the number of addresses to scan the next iteration - n = scanN - extraScan - } - - return newEntries[:nAddAddrs], nil -} - -// Fingerprint returns a unique ID fingerprint for this wallet, composed of its initial address -// and wallet type -func (w *Bip44Wallet) Fingerprint() string { - addr := "" - if len(w.ExternalEntries) == 0 { - if !w.IsEncrypted() { - entries, err := w.generateEntries(1, bip44.ExternalChainIndex, 0) - if err != nil { - logger.WithError(err).Panic("Fingerprint failed to generate initial entry for empty wallet") - } - addr = entries[0].Address.String() - } - } else { - addr = w.ExternalEntries[0].Address.String() - } - return fmt.Sprintf("%s-%s", w.Type(), addr) -} - -// ReadableBip44Wallet used for [de]serialization of a deterministic wallet -type ReadableBip44Wallet struct { - Meta `json:"meta"` - ReadableEntries `json:"entries"` -} - -// LoadReadableBip44Wallet loads a deterministic wallet from disk -func LoadReadableBip44Wallet(wltFile string) (*ReadableBip44Wallet, error) { - var rw ReadableBip44Wallet - if err := file.LoadJSON(wltFile, &rw); err != nil { - return nil, err - } - if rw.Type() != WalletTypeBip44 { - return nil, ErrInvalidWalletType - } - return &rw, nil -} - -// NewReadableBip44Wallet creates readable wallet -func NewReadableBip44Wallet(w *Bip44Wallet) *ReadableBip44Wallet { - return &ReadableBip44Wallet{ - Meta: w.Meta.clone(), - ReadableEntries: newReadableEntries(w.GetEntries(), w.Meta.Coin(), w.Meta.Type()), - } -} - -// ToWallet convert readable wallet to Wallet -func (rw *ReadableBip44Wallet) ToWallet() (Wallet, error) { - w := &Bip44Wallet{ - Meta: rw.Meta.clone(), - } - - if err := w.Validate(); err != nil { - err := fmt.Errorf("invalid wallet %q: %v", w.Filename(), err) - logger.WithError(err).Error("ReadableBip44Wallet.ToWallet Validate failed") - return nil, err - } - - ets, err := rw.ReadableEntries.toWalletEntries(w.Meta.Coin(), w.Meta.Type(), w.Meta.IsEncrypted()) - if err != nil { - logger.WithError(err).Error("ReadableBip44Wallet.ToWallet ReadableEntries.toWalletEntries failed") - return nil, err - } - - // Split the single array of entries into separate external and change chains, - // for easier internal management - for _, e := range ets { - switch e.Change { - case bip44.ExternalChainIndex: - w.ExternalEntries = append(w.ExternalEntries, e) - case bip44.ChangeChainIndex: - w.ChangeEntries = append(w.ChangeEntries, e) - default: - logger.Panicf("invalid change value %d", e.Change) - } - } - - // Sort childNumber low to high - sort.Slice(w.ExternalEntries, func(i, j int) bool { - return w.ExternalEntries[i].ChildNumber < w.ExternalEntries[j].ChildNumber - }) - sort.Slice(w.ChangeEntries, func(i, j int) bool { - return w.ChangeEntries[i].ChildNumber < w.ChangeEntries[j].ChildNumber - }) - - return w, err -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/collection_wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/collection_wallet.go deleted file mode 100644 index ad0de07..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/collection_wallet.go +++ /dev/null @@ -1,207 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/util/file" -) - -// CollectionWallet manages keys as an arbitrary collection. -// It has no defined keypair generator. The only way to add keys to the -// wallet is to explicitly add them. -// This wallet does not support address scanning or generation. -// This wallet does not use seeds. -type CollectionWallet struct { - Meta - Entries Entries -} - -// newCollectionWallet creates a CollectionWallet -func newCollectionWallet(meta Meta) (*CollectionWallet, error) { //nolint:unparam - return &CollectionWallet{ - Meta: meta, - }, nil -} - -// PackSecrets copies data from decrypted wallets into the secrets container -func (w *CollectionWallet) PackSecrets(ss Secrets) { - ss.set(secretSeed, w.Meta.Seed()) - ss.set(secretLastSeed, w.Meta.LastSeed()) - - // Saves entry secret keys in secrets - for _, e := range w.Entries { - ss.set(e.Address.String(), e.Secret.Hex()) - } -} - -// UnpackSecrets copies data from decrypted secrets into the wallet -func (w *CollectionWallet) UnpackSecrets(ss Secrets) error { - return w.Entries.unpackSecretKeys(ss) -} - -// Clone clones the wallet a new wallet object -func (w *CollectionWallet) Clone() Wallet { - return &CollectionWallet{ - Meta: w.Meta.clone(), - Entries: w.Entries.clone(), - } -} - -// CopyFrom copies the src wallet by reallocating -func (w *CollectionWallet) CopyFrom(src Wallet) { - w.Meta = src.(*CollectionWallet).Meta.clone() - w.Entries = src.(*CollectionWallet).Entries.clone() -} - -// CopyFromRef copies the src wallet with a pointer dereference -func (w *CollectionWallet) CopyFromRef(src Wallet) { - *w = *(src.(*CollectionWallet)) -} - -// Erase wipes secret fields in wallet -func (w *CollectionWallet) Erase() { - w.Meta.eraseSeeds() - w.Entries.erase() -} - -// ToReadable converts the wallet to its readable (serializable) format -func (w *CollectionWallet) ToReadable() Readable { - return NewReadableCollectionWallet(w) -} - -// Validate validates the wallet -func (w *CollectionWallet) Validate() error { - return w.Meta.validate() -} - -// GetEntries returns a copy of all entries held by the wallet -func (w *CollectionWallet) GetEntries() Entries { - return w.Entries.clone() -} - -// EntriesLen returns the number of entries in the wallet -func (w *CollectionWallet) EntriesLen() int { - return len(w.Entries) -} - -// GetEntryAt returns entry at a given index in the entries array -func (w *CollectionWallet) GetEntryAt(i int) Entry { - return w.Entries[i] -} - -// GetEntry returns entry of given address -func (w *CollectionWallet) GetEntry(a cipher.Address) (Entry, bool) { - return w.Entries.get(a) -} - -// HasEntry returns true if the wallet has an Entry with a given cipher.Address. -func (w *CollectionWallet) HasEntry(a cipher.Address) bool { - return w.Entries.has(a) -} - -// GenerateAddresses is a no-op for "collection" wallets -func (w *CollectionWallet) GenerateAddresses(num uint64) ([]cipher.Addresser, error) { - return nil, NewError(errors.New("A collection wallet does not implement GenerateAddresses")) -} - -// GenerateSkycoinAddresses is a no-op for "collection" wallets -func (w *CollectionWallet) GenerateSkycoinAddresses(num uint64) ([]cipher.Address, error) { - return nil, NewError(errors.New("A collection wallet does not implement GenerateSkycoinAddresses")) -} - -// ScanAddresses is a no-op for "collection" wallets -func (w *CollectionWallet) ScanAddresses(scanN uint64, tf TransactionsFinder) error { - return NewError(errors.New("A collection wallet does not implement ScanAddresses")) -} - -// GetAddresses returns all addresses in wallet -func (w *CollectionWallet) GetAddresses() []cipher.Addresser { - return w.Entries.getAddresses() -} - -// GetSkycoinAddresses returns all Skycoin addresses in wallet. The wallet's coin type must be Skycoin. -func (w *CollectionWallet) GetSkycoinAddresses() ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("CollectionWallet coin type is not skycoin") - } - - return w.Entries.getSkycoinAddresses(), nil -} - -// Fingerprint returns an empty string; fingerprints are only defined for -// wallets with a seed -func (w *CollectionWallet) Fingerprint() string { - return "" -} - -// AddEntry adds a new entry to the wallet. -func (w *CollectionWallet) AddEntry(e Entry) error { - if w.IsEncrypted() { - return ErrWalletEncrypted - } - - if err := e.Verify(); err != nil { - return err - } - - for _, entry := range w.Entries { - if e.SkycoinAddress() == entry.SkycoinAddress() { - return errors.New("wallet already contains entry with this address") - } - } - - w.Entries = append(w.Entries, e) - return nil -} - -// ReadableCollectionWallet used for [de]serialization of a collection wallet -type ReadableCollectionWallet struct { - Meta `json:"meta"` - ReadableEntries `json:"entries"` -} - -// NewReadableCollectionWallet creates readable wallet -func NewReadableCollectionWallet(w *CollectionWallet) *ReadableCollectionWallet { - return &ReadableCollectionWallet{ - Meta: w.Meta.clone(), - ReadableEntries: newReadableEntries(w.Entries, w.Meta.Coin(), w.Meta.Type()), - } -} - -// LoadReadableCollectionWallet loads a collection wallet from disk -func LoadReadableCollectionWallet(wltFile string) (*ReadableCollectionWallet, error) { - logger.WithField("filename", wltFile).Info("LoadReadableCollectionWallet") - var rw ReadableCollectionWallet - if err := file.LoadJSON(wltFile, &rw); err != nil { - return nil, err - } - if rw.Type() != WalletTypeCollection { - return nil, ErrInvalidWalletType - } - return &rw, nil -} - -// ToWallet convert readable wallet to Wallet -func (rw *ReadableCollectionWallet) ToWallet() (Wallet, error) { - w := &CollectionWallet{ - Meta: rw.Meta.clone(), - } - - if err := w.Validate(); err != nil { - err := fmt.Errorf("invalid wallet %q: %v", w.Filename(), err) - logger.WithError(err).Error("ReadableCollectionWallet.ToWallet Validate failed") - return nil, err - } - - ets, err := rw.ReadableEntries.toWalletEntries(w.Meta.Coin(), w.Meta.Type(), w.Meta.IsEncrypted()) - if err != nil { - logger.WithError(err).Error("ReadableCollectionWallet.ToWallet toWalletEntries failed") - return nil, err - } - - w.Entries = ets - - return w, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/crypto.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/crypto.go deleted file mode 100644 index 49ec0b1..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/crypto.go +++ /dev/null @@ -1,66 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher/encrypt" -) - -type cryptor interface { - Encrypt(data, password []byte) ([]byte, error) - Decrypt(data, password []byte) ([]byte, error) -} - -// CryptoType represents the type of crypto name -type CryptoType string - -// CryptoTypeFromString converts string to CryptoType -func CryptoTypeFromString(s string) (CryptoType, error) { - switch CryptoType(s) { - case CryptoTypeSha256Xor: - return CryptoTypeSha256Xor, nil - case CryptoTypeScryptChacha20poly1305: - return CryptoTypeScryptChacha20poly1305, nil - case CryptoTypeScryptChacha20poly1305Insecure: - return CryptoTypeScryptChacha20poly1305Insecure, nil - default: - return "", errors.New("unknown crypto type") - } -} - -// Crypto types -const ( - // CryptoTypeSha256Xor uses the SHA256-XOR encryption method (unsafe - no key derivation) - CryptoTypeSha256Xor = CryptoType("sha256-xor") - // CryptoTypeScryptChacha20poly1305 uses chacha20poly1305 + scrypt key derivation (use this) - CryptoTypeScryptChacha20poly1305 = CryptoType("scrypt-chacha20poly1305") - // CryptoTypeScryptChacha20poly1305Insecure uses chacha20poly1305 + scrypt key derivation with a weak work factor (unsafe) - CryptoTypeScryptChacha20poly1305Insecure = CryptoType("scrypt-chacha20poly1305-insecure") - - // DefaultCryptoType is the default CryptoType used - DefaultCryptoType = CryptoTypeScryptChacha20poly1305 -) - -// cryptoTable records all supported wallet crypto methods -// If want to support new crypto methods, register here. -var cryptoTable = map[CryptoType]cryptor{ - CryptoTypeSha256Xor: encrypt.DefaultSha256Xor, - CryptoTypeScryptChacha20poly1305: encrypt.DefaultScryptChacha20poly1305, - CryptoTypeScryptChacha20poly1305Insecure: encrypt.ScryptChacha20poly1305{ - N: 1 << 15, - R: encrypt.ScryptR, - P: encrypt.ScryptP, - KeyLen: encrypt.ScryptKeyLen, - }, -} - -// getCrypto gets crypto of given type -func getCrypto(cryptoType CryptoType) (cryptor, error) { - c, ok := cryptoTable[cryptoType] - if !ok { - return nil, fmt.Errorf("can not find crypto %v in crypto table", cryptoType) - } - - return c, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/deterministic_wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/deterministic_wallet.go deleted file mode 100644 index 3464cac..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/deterministic_wallet.go +++ /dev/null @@ -1,321 +0,0 @@ -package wallet - -import ( - "encoding/hex" - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/util/file" -) - -// DeterministicWallet manages keys using the original Skycoin deterministic -// keypair generator method. -// With this generator, a single chain of addresses is created, each one dependent -// on the previous. -type DeterministicWallet struct { - Meta - Entries Entries -} - -// newDeterministicWallet creates a DeterministicWallet -func newDeterministicWallet(meta Meta) (*DeterministicWallet, error) { //nolint:unparam - return &DeterministicWallet{ - Meta: meta, - }, nil -} - -// PackSecrets copies data from decrypted wallets into the secrets container -func (w *DeterministicWallet) PackSecrets(ss Secrets) { - ss.set(secretSeed, w.Meta.Seed()) - ss.set(secretLastSeed, w.Meta.LastSeed()) - - // Saves entry secret keys in secrets - for _, e := range w.Entries { - ss.set(e.Address.String(), e.Secret.Hex()) - } -} - -// UnpackSecrets copies data from decrypted secrets into the wallet -func (w *DeterministicWallet) UnpackSecrets(ss Secrets) error { - seed, ok := ss.get(secretSeed) - if !ok { - return errors.New("seed doesn't exist in secrets") - } - w.Meta.setSeed(seed) - - lastSeed, ok := ss.get(secretLastSeed) - if !ok { - return errors.New("lastSeed doesn't exist in secrets") - } - w.Meta.setLastSeed(lastSeed) - - return w.Entries.unpackSecretKeys(ss) -} - -// Clone clones the wallet a new wallet object -func (w *DeterministicWallet) Clone() Wallet { - return &DeterministicWallet{ - Meta: w.Meta.clone(), - Entries: w.Entries.clone(), - } -} - -// CopyFrom copies the src wallet to w -func (w *DeterministicWallet) CopyFrom(src Wallet) { - w.Meta = src.(*DeterministicWallet).Meta.clone() - w.Entries = src.(*DeterministicWallet).Entries.clone() -} - -// CopyFromRef copies the src wallet with a pointer dereference -func (w *DeterministicWallet) CopyFromRef(src Wallet) { - *w = *(src.(*DeterministicWallet)) -} - -// Erase wipes secret fields in wallet -func (w *DeterministicWallet) Erase() { - w.Meta.eraseSeeds() - w.Entries.erase() -} - -// ToReadable converts the wallet to its readable (serializable) format -func (w *DeterministicWallet) ToReadable() Readable { - return NewReadableDeterministicWallet(w) -} - -// Validate validates the wallet -func (w *DeterministicWallet) Validate() error { - return w.Meta.validate() -} - -// GetAddresses returns all addresses in wallet -func (w *DeterministicWallet) GetAddresses() []cipher.Addresser { - return w.Entries.getAddresses() -} - -// GetSkycoinAddresses returns all Skycoin addresses in wallet. The wallet's coin type must be Skycoin. -func (w *DeterministicWallet) GetSkycoinAddresses() ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("DeterministicWallet coin type is not skycoin") - } - - return w.Entries.getSkycoinAddresses(), nil -} - -// GetEntries returns a copy of all entries held by the wallet -func (w *DeterministicWallet) GetEntries() Entries { - return w.Entries.clone() -} - -// EntriesLen returns the number of entries in the wallet -func (w *DeterministicWallet) EntriesLen() int { - return len(w.Entries) -} - -// GetEntryAt returns entry at a given index in the entries array -func (w *DeterministicWallet) GetEntryAt(i int) Entry { - return w.Entries[i] -} - -// GetEntry returns entry of given address -func (w *DeterministicWallet) GetEntry(a cipher.Address) (Entry, bool) { - return w.Entries.get(a) -} - -// HasEntry returns true if the wallet has an Entry with a given cipher.Address. -func (w *DeterministicWallet) HasEntry(a cipher.Address) bool { - return w.Entries.has(a) -} - -// GenerateAddresses generates addresses -func (w *DeterministicWallet) GenerateAddresses(num uint64) ([]cipher.Addresser, error) { - if w.Meta.IsEncrypted() { - return nil, ErrWalletEncrypted - } - - if num == 0 { - return nil, nil - } - - var seckeys []cipher.SecKey - var seed []byte - if len(w.Entries) == 0 { - seed, seckeys = cipher.MustGenerateDeterministicKeyPairsSeed([]byte(w.Meta.Seed()), int(num)) - } else { - sd, err := hex.DecodeString(w.Meta.LastSeed()) - if err != nil { - return nil, fmt.Errorf("decode hex seed failed: %v", err) - } - seed, seckeys = cipher.MustGenerateDeterministicKeyPairsSeed(sd, int(num)) - } - - w.Meta.setLastSeed(hex.EncodeToString(seed)) - - addrs := make([]cipher.Addresser, len(seckeys)) - makeAddress := w.Meta.AddressConstructor() - for i, s := range seckeys { - p := cipher.MustPubKeyFromSecKey(s) - a := makeAddress(p) - addrs[i] = a - w.Entries = append(w.Entries, Entry{ - Address: a, - Secret: s, - Public: p, - }) - } - return addrs, nil -} - -// GenerateSkycoinAddresses generates Skycoin addresses. If the wallet's coin type is not Skycoin, returns an error -func (w *DeterministicWallet) GenerateSkycoinAddresses(num uint64) ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("GenerateSkycoinAddresses called for non-skycoin wallet") - } - - addrs, err := w.GenerateAddresses(num) - if err != nil { - return nil, err - } - - skyAddrs := make([]cipher.Address, len(addrs)) - for i, a := range addrs { - skyAddrs[i] = a.(cipher.Address) - } - - return skyAddrs, nil -} - -// reset resets the wallet entries and move the lastSeed to origin -func (w *DeterministicWallet) reset() { - w.Entries = Entries{} - w.Meta.setLastSeed(w.Meta.Seed()) -} - -// ScanAddresses scans ahead N addresses, truncating up to the highest address with any transaction history. -func (w *DeterministicWallet) ScanAddresses(scanN uint64, tf TransactionsFinder) error { - if w.Meta.IsEncrypted() { - return ErrWalletEncrypted - } - - if scanN == 0 { - return nil - } - - w2 := w.Clone().(*DeterministicWallet) - - nExistingAddrs := uint64(len(w2.Entries)) - nAddAddrs := uint64(0) - n := scanN - extraScan := uint64(0) - - for { - // Generate the addresses to scan - addrs, err := w2.GenerateSkycoinAddresses(n) - if err != nil { - return err - } - - // Find if these addresses had any activity - active, err := tf.AddressesActivity(addrs) - if err != nil { - return err - } - - // Check activity from the last one until we find the address that has activity - var keepNum uint64 - for i := len(active) - 1; i >= 0; i-- { - if active[i] { - keepNum = uint64(i + 1) - break - } - } - - if keepNum == 0 { - break - } - - nAddAddrs += keepNum + extraScan - - // extraScan is the number of addresses with no activity beyond the - // last address with activity - extraScan = n - keepNum - - // n is the number of addresses to scan the next iteration - n = scanN - extraScan - } - - // Regenerate addresses up to nExistingAddrs + nAddAddrs. - // This is necessary to keep the lastSeed updated. - w2.reset() - if _, err := w2.GenerateSkycoinAddresses(nExistingAddrs + nAddAddrs); err != nil { - return err - } - - *w = *w2 - - return nil -} - -// Fingerprint returns a unique ID fingerprint for this wallet, composed of its initial address -// and wallet type -func (w *DeterministicWallet) Fingerprint() string { - addr := "" - if len(w.Entries) == 0 { - if !w.IsEncrypted() { - _, pk, _ := cipher.MustDeterministicKeyPairIterator([]byte(w.Meta.Seed())) - addr = w.Meta.AddressConstructor()(pk).String() - } - } else { - addr = w.Entries[0].Address.String() - } - return fmt.Sprintf("%s-%s", w.Type(), addr) -} - -// ReadableDeterministicWallet used for [de]serialization of a deterministic wallet -type ReadableDeterministicWallet struct { - Meta `json:"meta"` - ReadableEntries `json:"entries"` -} - -// LoadReadableDeterministicWallet loads a deterministic wallet from disk -func LoadReadableDeterministicWallet(wltFile string) (*ReadableDeterministicWallet, error) { - var rw ReadableDeterministicWallet - if err := file.LoadJSON(wltFile, &rw); err != nil { - return nil, err - } - if rw.Type() != WalletTypeDeterministic { - return nil, ErrInvalidWalletType - } - return &rw, nil -} - -// NewReadableDeterministicWallet creates readable wallet -func NewReadableDeterministicWallet(w *DeterministicWallet) *ReadableDeterministicWallet { - return &ReadableDeterministicWallet{ - Meta: w.Meta.clone(), - ReadableEntries: newReadableEntries(w.Entries, w.Meta.Coin(), w.Meta.Type()), - } -} - -// ToWallet convert readable wallet to Wallet -func (rw *ReadableDeterministicWallet) ToWallet() (Wallet, error) { - w := &DeterministicWallet{ - Meta: rw.Meta.clone(), - } - - if err := w.Validate(); err != nil { - err := fmt.Errorf("invalid wallet %q: %v", w.Filename(), err) - logger.WithError(err).Error("ReadableDeterministicWallet.ToWallet Validate failed") - return nil, err - } - - ets, err := rw.ReadableEntries.toWalletEntries(w.Meta.Coin(), w.Meta.Type(), w.Meta.IsEncrypted()) - if err != nil { - logger.WithError(err).Error("ReadableDeterministicWallet.ToWallet toWalletEntries failed") - return nil, err - } - - w.Entries = ets - - return w, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/entry.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/entry.go deleted file mode 100644 index a207f4f..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/entry.go +++ /dev/null @@ -1,126 +0,0 @@ -package wallet - -import ( - "encoding/hex" - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" -) - -// Entry represents the wallet entry -type Entry struct { - Address cipher.Addresser - Public cipher.PubKey - Secret cipher.SecKey - ChildNumber uint32 // For bip32/bip44 - Change uint32 // For bip44 -} - -// SkycoinAddress returns the Skycoin address of an entry. Panics if Address is not a Skycoin address -func (we Entry) SkycoinAddress() cipher.Address { - return we.Address.(cipher.Address) -} - -// BitcoinAddress returns the Skycoin address of an entry. Panics if Address is not a Bitcoin address -func (we Entry) BitcoinAddress() cipher.BitcoinAddress { - return we.Address.(cipher.BitcoinAddress) -} - -// Verify checks that the public key is derivable from the secret key, -// and that the public key is associated with the address -func (we *Entry) Verify() error { - pk, err := cipher.PubKeyFromSecKey(we.Secret) - if err != nil { - return err - } - - if pk != we.Public { - return errors.New("invalid public key for secret key") - } - - return we.VerifyPublic() -} - -// VerifyPublic checks that the public key is associated with the address -func (we *Entry) VerifyPublic() error { - if err := we.Public.Verify(); err != nil { - return err - } - return we.Address.Verify(we.Public) -} - -// Entries are an array of wallet entries -type Entries []Entry - -func (entries Entries) clone() Entries { - if len(entries) == 0 { - return nil - } - return append(Entries{}, entries...) -} - -func (entries Entries) has(a cipher.Address) bool { - // This doesn't use getEntry() to avoid copying an Entry in the return value, - // which may contain a secret key - for _, e := range entries { - if e.SkycoinAddress() == a { - return true - } - } - return false -} - -func (entries Entries) get(a cipher.Address) (Entry, bool) { - for _, e := range entries { - if e.SkycoinAddress() == a { - return e, true - } - } - return Entry{}, false -} - -func (entries Entries) getSkycoinAddresses() []cipher.Address { - addrs := make([]cipher.Address, len(entries)) - for i, e := range entries { - addrs[i] = e.SkycoinAddress() - } - return addrs -} - -func (entries Entries) getAddresses() []cipher.Addresser { - addrs := make([]cipher.Addresser, len(entries)) - for i, e := range entries { - addrs[i] = e.Address - } - return addrs -} - -// eraseEntries wipes private keys in entries -func (entries Entries) erase() { - for i := range entries { - for j := range entries[i].Secret { - entries[i].Secret[j] = 0 - } - entries[i].Secret = cipher.SecKey{} - } -} - -// unpackSecretKeys for each entry, look for the secret key in the Secrets dict, keyed by address -func (entries Entries) unpackSecretKeys(ss Secrets) error { - for i, e := range entries { - sstr, ok := ss.get(e.Address.String()) - if !ok { - return fmt.Errorf("secret of address %s doesn't exist in secrets", e.Address) - } - - s, err := hex.DecodeString(sstr) - if err != nil { - return fmt.Errorf("decode secret hex string failed: %v", err) - } - - copy(entries[i].Secret[:], s[:]) - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/meta.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/meta.go deleted file mode 100644 index 10f878a..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/meta.go +++ /dev/null @@ -1,357 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - "strconv" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip39" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" -) - -// wallet meta fields -const ( - metaVersion = "version" // wallet version - metaFilename = "filename" // wallet file name - metaLabel = "label" // wallet label - metaTimestamp = "tm" // the timestamp when creating the wallet - metaType = "type" // wallet type - metaCoin = "coin" // coin type - metaEncrypted = "encrypted" // whether the wallet is encrypted - metaCryptoType = "cryptoType" // encrytion/decryption type - metaSeed = "seed" // wallet seed - metaLastSeed = "lastSeed" // seed for generating next address [deterministic wallets] - metaSecrets = "secrets" // secrets which records the encrypted seeds and secrets of address entries - metaBip44Coin = "bip44Coin" // bip44 coin type - metaSeedPassphrase = "seedPassphrase" // seed passphrase [bip44 wallets] - metaXPub = "xpub" // xpub key [xpub wallets] -) - -// Meta holds wallet metadata -type Meta map[string]string - -func (m Meta) clone() Meta { - mm := make(Meta, len(m)) - for k, v := range m { - mm[k] = v - } - return mm -} - -// erase wipes the seed and last seed -func (m Meta) eraseSeeds() { - m.setSeed("") - m.setLastSeed("") - m.setSeedPassphrase("") -} - -// validate validates the wallet -func (m Meta) validate() error { - if fn := m[metaFilename]; fn == "" { - return errors.New("filename not set") - } - - if tm := m[metaTimestamp]; tm != "" { - _, err := strconv.ParseInt(tm, 10, 64) - if err != nil { - return errors.New("invalid timestamp") - } - } - - walletType, ok := m[metaType] - if !ok { - return errors.New("type field not set") - } - if !IsValidWalletType(walletType) { - return ErrInvalidWalletType - } - - if coinType := m[metaCoin]; coinType == "" { - return errors.New("coin field not set") - } - - var isEncrypted bool - if encStr, ok := m[metaEncrypted]; ok { - // validate the encrypted value - var err error - isEncrypted, err = strconv.ParseBool(encStr) - if err != nil { - return errors.New("encrypted field is not a valid bool") - } - } - - if isEncrypted { - cryptoType, ok := m[metaCryptoType] - if !ok { - return errors.New("crypto type field not set") - } - - if _, err := getCrypto(CryptoType(cryptoType)); err != nil { - return errors.New("unknown crypto type") - } - - if s := m[metaSecrets]; s == "" { - return errors.New("wallet is encrypted, but secrets field not set") - } - - if s := m[metaSeed]; s != "" { - return errors.New("seed should not be visible in encrypted wallets") - } - - if s := m[metaLastSeed]; s != "" { - return errors.New("lastSeed should not be visible in encrypted wallets") - } - } else { - if s := m[metaSecrets]; s != "" { - return errors.New("secrets should not be in unencrypted wallets") - } - } - - switch walletType { - case WalletTypeCollection: - if s := m[metaSeed]; s != "" { - return errors.New("seed should not be in collection wallets") - } - - if s := m[metaLastSeed]; s != "" { - return errors.New("lastSeed should not be in collection wallets") - } - case WalletTypeDeterministic: - if !isEncrypted { - if s := m[metaSeed]; s == "" { - return errors.New("seed missing in unencrypted deterministic wallet") - } - - if s := m[metaLastSeed]; s == "" { - return errors.New("lastSeed missing in unencrypted deterministic wallet") - } - } - case WalletTypeBip44: - if !isEncrypted { - // bip44 wallet seeds must be a valid bip39 mnemonic - if s := m[metaSeed]; s == "" { - return errors.New("seed missing in unencrypted bip44 wallet") - } else if err := bip39.ValidateMnemonic(s); err != nil { - return err - } - } - - if s := m[metaBip44Coin]; s == "" { - return errors.New("bip44Coin missing") - } else if _, err := strconv.ParseUint(s, 10, 32); err != nil { - return fmt.Errorf("bip44Coin invalid: %v", err) - } - - if s := m[metaLastSeed]; s != "" { - return errors.New("lastSeed should not be in bip44 wallets") - } - case WalletTypeXPub: - if s := m[metaSeed]; s != "" { - return errors.New("seed should not be in xpub wallets") - } - - if s := m[metaLastSeed]; s != "" { - return errors.New("lastSeed should not be in xpub wallets") - } - default: - return errors.New("unhandled wallet type") - } - - if m[metaXPub] != "" && walletType != WalletTypeXPub { - return errors.New("xpub is only used for xpub wallets") - } - - return nil -} - -// Find returns a key value from the metadata map -func (m Meta) Find(k string) string { - return m[k] -} - -// Type gets the wallet type -func (m Meta) Type() string { - return m[metaType] -} - -// Version gets the wallet version -func (m Meta) Version() string { - return m[metaVersion] -} - -// SetVersion sets the wallet version -func (m Meta) SetVersion(v string) { - m[metaVersion] = v -} - -// Filename gets the wallet filename -func (m Meta) Filename() string { - return m[metaFilename] -} - -// SetFilename sets the wallet filename -func (m Meta) SetFilename(fn string) { - m[metaFilename] = fn -} - -// Label gets the wallet label -func (m Meta) Label() string { - return m[metaLabel] -} - -// SetLabel sets the wallet label -func (m Meta) SetLabel(label string) { - m[metaLabel] = label -} - -// LastSeed returns the last seed -func (m Meta) LastSeed() string { - return m[metaLastSeed] -} - -func (m Meta) setLastSeed(lseed string) { - m[metaLastSeed] = lseed -} - -// Seed returns the seed -func (m Meta) Seed() string { - return m[metaSeed] -} - -func (m Meta) setSeed(seed string) { - m[metaSeed] = seed -} - -// SeedPassphrase returns the seed passphrase -func (m Meta) SeedPassphrase() string { - return m[metaSeedPassphrase] -} - -func (m Meta) setSeedPassphrase(p string) { - m[metaSeedPassphrase] = p -} - -// Coin returns the wallet's coin type -func (m Meta) Coin() CoinType { - return CoinType(m[metaCoin]) -} - -// SetCoin sets the wallet's coin type -func (m Meta) SetCoin(ct CoinType) { - m[metaCoin] = string(ct) -} - -// Bip44Coin returns the bip44 coin type -func (m Meta) Bip44Coin() bip44.CoinType { - c := m[metaBip44Coin] - if c == "" { - logger.Critical().Error("wallet.Meta.Bip44Coin() is empty") - return bip44.CoinType(0) - } - - x, err := strconv.ParseUint(c, 10, 32) - if err != nil { - logger.WithError(err).Panic() - } - - return bip44.CoinType(x) -} - -func (m Meta) setBip44Coin(ct bip44.CoinType) { - m[metaBip44Coin] = strconv.FormatUint(uint64(ct), 10) -} - -func (m Meta) setIsEncrypted(encrypt bool) { - m[metaEncrypted] = strconv.FormatBool(encrypt) -} - -// SetEncrypted sets encryption fields -func (m Meta) SetEncrypted(cryptoType CryptoType, encryptedSecrets string) { - m.setCryptoType(cryptoType) - m.setSecrets(encryptedSecrets) - m.setIsEncrypted(true) -} - -// SetDecrypted unsets encryption fields -func (m Meta) SetDecrypted() { - m.setIsEncrypted(false) - m.setSecrets("") - m.setCryptoType("") -} - -// IsEncrypted checks whether the wallet is encrypted. -func (m Meta) IsEncrypted() bool { - encStr, ok := m[metaEncrypted] - if !ok { - return false - } - - b, err := strconv.ParseBool(encStr) - if err != nil { - // This can't happen, the meta.encrypted value is either set by - // setEncrypted() method or converted in ReadableWallet.toWallet(). - // toWallet() method will throw error if the meta.encrypted string is invalid. - logger.Critical().WithError(err).Error("parse wallet.meta.encrypted string failed") - return false - } - return b -} - -func (m Meta) setCryptoType(tp CryptoType) { - m[metaCryptoType] = string(tp) -} - -// CryptoType returns the encryption type -func (m Meta) CryptoType() CryptoType { - return CryptoType(m[metaCryptoType]) -} - -// Secrets returns the encrypted wallet secrets -func (m Meta) Secrets() string { - return m[metaSecrets] -} - -func (m Meta) setSecrets(s string) { - m[metaSecrets] = s -} - -// Timestamp returns the timestamp -func (m Meta) Timestamp() int64 { - // Intentionally ignore the error when parsing the timestamp, - // if it isn't valid or is missing it will be set to 0. - // Also, this value is validated by wallet.validate() - x, _ := strconv.ParseInt(m[metaTimestamp], 10, 64) //nolint:errcheck - return x -} - -// SetTimestamp sets the timestamp -func (m Meta) SetTimestamp(t int64) { - m[metaTimestamp] = strconv.FormatInt(t, 10) -} - -// AddressConstructor returns a function to create a cipher.Addresser from a cipher.PubKey -func (m Meta) AddressConstructor() func(cipher.PubKey) cipher.Addresser { - switch m.Coin() { - case CoinTypeSkycoin: - return func(pk cipher.PubKey) cipher.Addresser { - return cipher.AddressFromPubKey(pk) - } - case CoinTypeBitcoin: - return func(pk cipher.PubKey) cipher.Addresser { - return cipher.BitcoinAddressFromPubKey(pk) - } - default: - logger.Panicf("Invalid wallet coin type %q", m.Coin()) - return nil - } -} - -func (m Meta) setXPub(xpub string) { - m[metaXPub] = xpub -} - -// XPub returns the wallet's configured XPub key -func (m Meta) XPub() string { - return m[metaXPub] -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/readable.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/readable.go deleted file mode 100644 index 4ea78cd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/readable.go +++ /dev/null @@ -1,200 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" -) - -// ReadableEntry wallet entry with json tags -type ReadableEntry struct { - Address string `json:"address"` - Public string `json:"public_key"` - Secret string `json:"secret_key"` - ChildNumber *uint32 `json:"child_number,omitempty"` // For bip32/bip44 - Change *uint32 `json:"change,omitempty"` // For bip44 -} - -// NewReadableEntry creates readable wallet entry -func NewReadableEntry(coinType CoinType, walletType string, e Entry) ReadableEntry { - re := ReadableEntry{} - if !e.Address.Null() { - re.Address = e.Address.String() - } - - if !e.Public.Null() { - re.Public = e.Public.Hex() - } - - if !e.Secret.Null() { - switch coinType { - case CoinTypeSkycoin: - re.Secret = e.Secret.Hex() - case CoinTypeBitcoin: - re.Secret = cipher.BitcoinWalletImportFormatFromSeckey(e.Secret) - default: - logger.Panicf("Invalid coin type %q", coinType) - } - } - - switch walletType { - case WalletTypeBip44: - cn := e.ChildNumber - re.ChildNumber = &cn - change := e.Change - re.Change = &change - case WalletTypeXPub: - cn := e.ChildNumber - re.ChildNumber = &cn - if e.Change != 0 { - logger.Panicf("wallet.Entry.Change is not 0 but wallet type is %q", walletType) - } - default: - if e.ChildNumber != 0 { - logger.Panicf("wallet.Entry.ChildNumber is not 0 but wallet type is %q", walletType) - } - if e.Change != 0 { - logger.Panicf("wallet.Entry.Change is not 0 but wallet type is %q", walletType) - } - } - - return re -} - -// ReadableEntries array of ReadableEntry -type ReadableEntries []ReadableEntry - -func newReadableEntries(entries Entries, coinType CoinType, walletType string) ReadableEntries { - re := make(ReadableEntries, len(entries)) - for i, e := range entries { - re[i] = NewReadableEntry(coinType, walletType, e) - } - return re -} - -// GetEntries returns this array -func (res ReadableEntries) GetEntries() ReadableEntries { - return res -} - -// toWalletEntries convert readable entries to entries -// converts base on the wallet version. -func (res ReadableEntries) toWalletEntries(coinType CoinType, walletType string, isEncrypted bool) ([]Entry, error) { - entries := make([]Entry, len(res)) - for i, re := range res { - e, err := newEntryFromReadable(coinType, walletType, &re) - if err != nil { - return []Entry{}, err - } - - // Verify the wallet if it's not encrypted - if !isEncrypted && re.Secret != "" { - if err := e.Verify(); err != nil { - return nil, err - } - } - - entries[i] = *e - } - return entries, nil -} - -// newEntryFromReadable creates WalletEntry base one ReadableWalletEntry -func newEntryFromReadable(coinType CoinType, walletType string, re *ReadableEntry) (*Entry, error) { - var a cipher.Addresser - var err error - - switch coinType { - case CoinTypeSkycoin: - a, err = cipher.DecodeBase58Address(re.Address) - case CoinTypeBitcoin: - a, err = cipher.DecodeBase58BitcoinAddress(re.Address) - default: - logger.Panicf("Invalid coin type %q", coinType) - } - - if err != nil { - return nil, err - } - - p, err := cipher.PubKeyFromHex(re.Public) - if err != nil { - return nil, err - } - - // Decodes the secret hex string if any - var secret cipher.SecKey - if re.Secret != "" { - switch coinType { - case CoinTypeSkycoin: - secret, err = cipher.SecKeyFromHex(re.Secret) - case CoinTypeBitcoin: - secret, err = cipher.SecKeyFromBitcoinWalletImportFormat(re.Secret) - default: - logger.Panicf("Invalid coin type %q", coinType) - } - if err != nil { - return nil, err - } - } - - var childNumber uint32 - var change uint32 - switch walletType { - case WalletTypeBip44: - if re.ChildNumber == nil { - return nil, fmt.Errorf("child_number required for %q wallet type", walletType) - } - if re.Change == nil { - return nil, fmt.Errorf("change required for %q wallet type", walletType) - } - - childNumber = *re.ChildNumber - change = *re.Change - - switch change { - case bip44.ExternalChainIndex, bip44.ChangeChainIndex: - default: - return nil, errors.New("change must be either 0 or 1") - } - - case WalletTypeXPub: - if re.ChildNumber == nil { - return nil, fmt.Errorf("child_number required for %q wallet type", walletType) - } - - childNumber = *re.ChildNumber - - if re.Change != nil { - return nil, fmt.Errorf("change should not be set for %q wallet type", walletType) - } - - default: - if re.ChildNumber != nil { - return nil, fmt.Errorf("child_number should not be set for %q wallet type", walletType) - } - if re.Change != nil { - return nil, fmt.Errorf("change should not be set for %q wallet type", walletType) - } - } - - return &Entry{ - Address: a, - Public: p, - Secret: secret, - ChildNumber: childNumber, - Change: change, - }, nil -} - -// Readable defines the readable wallet API. -// A readable wallet is the on-disk representation of a wallet. -type Readable interface { - ToWallet() (Wallet, error) - Timestamp() int64 - SetFilename(string) - Filename() string - GetEntries() ReadableEntries -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/secrets.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/secrets.go deleted file mode 100644 index a1769eb..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/secrets.go +++ /dev/null @@ -1,36 +0,0 @@ -package wallet - -import "encoding/json" - -const ( - secretSeed = "seed" - secretLastSeed = "lastSeed" - secretSeedPassphrase = "seedPassphrase" -) - -// Secrets hold secret data, to be encrypted -type Secrets map[string]string - -func (s Secrets) get(key string) (string, bool) { - v, ok := s[key] - return v, ok -} - -func (s Secrets) set(key, v string) { - s[key] = v -} - -func (s Secrets) serialize() ([]byte, error) { - return json.Marshal(s) -} - -func (s Secrets) deserialize(data []byte) error { - return json.Unmarshal(data, &s) -} - -func (s Secrets) erase() { - for k := range s { - s[k] = "" - delete(s, k) - } -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/service.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/service.go deleted file mode 100644 index 3ad38bd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/service.go +++ /dev/null @@ -1,616 +0,0 @@ -package wallet - -import ( - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/util/file" -) - -// TransactionsFinder interface for finding address related transaction hashes -type TransactionsFinder interface { - AddressesActivity(addrs []cipher.Address) ([]bool, error) -} - -// Service wallet service struct -type Service struct { - sync.RWMutex - wallets Wallets - config Config - // fingerprints is used to check for duplicate deterministic wallets - fingerprints map[string]string -} - -// Config wallet service config -type Config struct { - WalletDir string - CryptoType CryptoType - EnableWalletAPI bool - EnableSeedAPI bool - Bip44Coin *bip44.CoinType -} - -// NewConfig creates a default Config -func NewConfig() Config { - bc := bip44.CoinTypeSkycoin - return Config{ - WalletDir: "./", - CryptoType: DefaultCryptoType, - EnableWalletAPI: false, - EnableSeedAPI: false, - Bip44Coin: &bc, - } -} - -// NewService new wallet service -func NewService(c Config) (*Service, error) { - serv := &Service{ - config: c, - fingerprints: make(map[string]string), - } - - if !serv.config.EnableWalletAPI { - return serv, nil - } - - if err := os.MkdirAll(c.WalletDir, os.FileMode(0700)); err != nil { - return nil, fmt.Errorf("failed to create wallet directory %s: %v", c.WalletDir, err) - } - - // Removes .wlt.bak files before loading wallets - if err := removeBackupFiles(serv.config.WalletDir); err != nil { - return nil, fmt.Errorf("remove .wlt.bak files in %v failed: %v", serv.config.WalletDir, err) - } - - // Load all wallets from disk - w, err := loadWallets(serv.config.WalletDir) - if err != nil { - return nil, fmt.Errorf("failed to load all wallets: %v", err) - } - - // Abort if there are duplicate wallets (identified by fingerprint) on disk - if wltID, fp, hasDup := w.containsDuplicate(); hasDup { - return nil, fmt.Errorf("duplicate wallet found with fingerprint %s in file %q", fp, wltID) - } - - // Abort if there are empty deterministic wallets on disk - if wltID, hasEmpty := w.containsEmpty(); hasEmpty { - return nil, fmt.Errorf("empty wallet file found: %q", wltID) - } - - serv.setWallets(w) - - fields := logrus.Fields{ - "walletDir": serv.config.WalletDir, - } - if serv.config.Bip44Coin != nil { - fields["bip44Coin"] = *serv.config.Bip44Coin - } - logger.WithFields(fields).Debug("wallet.NewService complete") - - return serv, nil -} - -// WalletDir returns the configured wallet directory -func (serv *Service) WalletDir() (string, error) { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return "", ErrWalletAPIDisabled - } - return serv.config.WalletDir, nil -} - -func (serv *Service) updateOptions(opts Options) Options { - // Apply service-configured default settings for wallet options - if opts.Encrypt && opts.CryptoType == "" { - opts.CryptoType = serv.config.CryptoType - } - if opts.Type == WalletTypeBip44 && opts.Bip44Coin == nil && serv.config.Bip44Coin != nil { - c := *serv.config.Bip44Coin - opts.Bip44Coin = &c - } - return opts -} - -// CreateWallet creates a wallet with the given wallet file name and options. -// A address will be automatically generated by default. -func (serv *Service) CreateWallet(wltName string, options Options, tf TransactionsFinder) (Wallet, error) { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - if wltName == "" { - wltName = serv.generateUniqueWalletFilename() - } - - options = serv.updateOptions(options) - return serv.loadWallet(wltName, options, tf) -} - -// loadWallet loads wallet from seed and scan the first N addresses -func (serv *Service) loadWallet(wltName string, options Options, tf TransactionsFinder) (Wallet, error) { - options = serv.updateOptions(options) - w, err := NewWalletScanAhead(wltName, options, tf) - if err != nil { - return nil, err - } - - fingerprint := w.Fingerprint() - if fingerprint != "" { - if _, ok := serv.fingerprints[fingerprint]; ok { - // Note: collection wallets do not have fingerprints - switch w.Type() { - case WalletTypeDeterministic, WalletTypeBip44: - return nil, ErrSeedUsed - case WalletTypeXPub: - return nil, ErrXPubKeyUsed - default: - logger.WithFields(logrus.Fields{ - "walletType": w.Type(), - "fingerprint": fingerprint, - }).Panic("Unhandled wallet type after fingerprint conflict") - } - } - } - - if err := serv.wallets.add(w); err != nil { - return nil, err - } - - if err := Save(w, serv.config.WalletDir); err != nil { - // If save fails, remove the added wallet - serv.wallets.remove(w.Filename()) - return nil, err - } - - if fingerprint != "" { - serv.fingerprints[fingerprint] = w.Filename() - } - - return w.Clone(), nil -} - -func (serv *Service) generateUniqueWalletFilename() string { - wltName := NewWalletFilename() - for { - if w := serv.wallets.get(wltName); w == nil { - break - } - wltName = NewWalletFilename() - } - - return wltName -} - -// EncryptWallet encrypts wallet with password -func (serv *Service) EncryptWallet(wltID string, password []byte) (Wallet, error) { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return nil, err - } - - if w.IsEncrypted() { - return nil, ErrWalletEncrypted - } - - if err := Lock(w, password, serv.config.CryptoType); err != nil { - return nil, err - } - - // Save to disk first - if err := Save(w, serv.config.WalletDir); err != nil { - return nil, err - } - - // Sets the encrypted wallet - serv.wallets.set(w) - return w, nil -} - -// DecryptWallet decrypts wallet with password -func (serv *Service) DecryptWallet(wltID string, password []byte) (Wallet, error) { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return nil, err - } - - // Returns error if wallet is not encrypted - if !w.IsEncrypted() { - return nil, ErrWalletNotEncrypted - } - - // Unlocks the wallet - unlockWlt, err := Unlock(w, password) - if err != nil { - return nil, err - } - - // Updates the wallet file - if err := Save(unlockWlt, serv.config.WalletDir); err != nil { - return nil, err - } - - // Sets the decrypted wallet in memory - serv.wallets.set(unlockWlt) - return unlockWlt, nil -} - -// NewAddresses generate address entries in given wallet, -// return nil if wallet does not exist. -// Set password as nil if the wallet is not encrypted, otherwise the password must be provided. -func (serv *Service) NewAddresses(wltID string, password []byte, num uint64) ([]cipher.Address, error) { - serv.Lock() - defer serv.Unlock() - - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return nil, err - } - - var addrs []cipher.Address - f := func(wlt Wallet) error { - var err error - addrs, err = wlt.GenerateSkycoinAddresses(num) - return err - } - - if w.IsEncrypted() { - if err := GuardUpdate(w, password, f); err != nil { - return nil, err - } - } else { - if len(password) != 0 { - return nil, ErrWalletNotEncrypted - } - - if err := f(w); err != nil { - return nil, err - } - } - - // Checks if the wallet file is writable - wf := filepath.Join(serv.config.WalletDir, w.Filename()) - if !file.IsWritable(wf) { - return nil, ErrWalletPermission - } - - // Save the wallet first - if err := Save(w, serv.config.WalletDir); err != nil { - return nil, err - } - - serv.wallets.set(w) - - return addrs, nil -} - -// GetSkycoinAddresses returns all addresses in given wallet -func (serv *Service) GetSkycoinAddresses(wltID string) ([]cipher.Address, error) { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return nil, err - } - - return w.GetSkycoinAddresses() -} - -// GetWallet returns wallet by id -func (serv *Service) GetWallet(wltID string) (Wallet, error) { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - return serv.getWallet(wltID) -} - -// returns the clone of the wallet of given id -func (serv *Service) getWallet(wltID string) (Wallet, error) { - w := serv.wallets.get(wltID) - if w == nil { - return nil, ErrWalletNotExist - } - return w.Clone(), nil -} - -// GetWallets returns all wallet clones -func (serv *Service) GetWallets() (Wallets, error) { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - wlts := make(Wallets, len(serv.wallets)) - for k, w := range serv.wallets { - wlts[k] = w.Clone() - } - return wlts, nil -} - -// UpdateWalletLabel updates the wallet label -func (serv *Service) UpdateWalletLabel(wltID, label string) error { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return err - } - - w.SetLabel(label) - - if err := Save(w, serv.config.WalletDir); err != nil { - return err - } - - serv.wallets.set(w) - return nil -} - -// UnloadWallet removes wallet of given wallet id from the service -func (serv *Service) UnloadWallet(wltID string) error { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - wlt := serv.wallets.get(wltID) - if wlt != nil { - if fp := wlt.Fingerprint(); fp != "" { - delete(serv.fingerprints, fp) - } - } - - serv.wallets.remove(wltID) - return nil -} - -func (serv *Service) setWallets(wlts Wallets) { - serv.wallets = wlts - - for wltID, wlt := range wlts { - if fp := wlt.Fingerprint(); fp != "" { - serv.fingerprints[fp] = wltID - } - } -} - -// GetWalletSeed returns seed and seed passphrase of encrypted wallet of given wallet id -// Returns ErrWalletNotEncrypted if it's not encrypted -func (serv *Service) GetWalletSeed(wltID string, password []byte) (string, string, error) { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return "", "", ErrWalletAPIDisabled - } - - if !serv.config.EnableSeedAPI { - return "", "", ErrSeedAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return "", "", err - } - - if !w.IsEncrypted() { - return "", "", ErrWalletNotEncrypted - } - - var seed, seedPassphrase string - if err := GuardView(w, password, func(wlt Wallet) error { - seed = wlt.Seed() - seedPassphrase = wlt.SeedPassphrase() - return nil - }); err != nil { - return "", "", err - } - - return seed, seedPassphrase, nil -} - -// UpdateSecrets opens a wallet for modification of secret data and saves it safely -func (serv *Service) UpdateSecrets(wltID string, password []byte, f func(Wallet) error) error { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return err - } - - if w.IsEncrypted() { - if err := GuardUpdate(w, password, f); err != nil { - return err - } - } else if len(password) != 0 { - return ErrWalletNotEncrypted - } else { - if err := f(w); err != nil { - return err - } - } - - // Save the wallet first - if err := Save(w, serv.config.WalletDir); err != nil { - return err - } - - serv.wallets.set(w) - - return nil -} - -// Update opens a wallet for modification of non-secret data and saves it safely -func (serv *Service) Update(wltID string, f func(Wallet) error) error { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return err - } - - if err := f(w); err != nil { - return err - } - - // Save the wallet first - if err := Save(w, serv.config.WalletDir); err != nil { - return err - } - - serv.wallets.set(w) - - return nil -} - -// ViewSecrets opens a wallet for reading secret data -func (serv *Service) ViewSecrets(wltID string, password []byte, f func(Wallet) error) error { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return err - } - - if w.IsEncrypted() { - return GuardView(w, password, f) - } else if len(password) != 0 { - return ErrWalletNotEncrypted - } else { - return f(w) - } -} - -// View opens a wallet for reading non-secret data -func (serv *Service) View(wltID string, f func(Wallet) error) error { - serv.RLock() - defer serv.RUnlock() - if !serv.config.EnableWalletAPI { - return ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltID) - if err != nil { - return err - } - - return f(w) -} - -// RecoverWallet recovers an encrypted wallet from seed. -// The recovered wallet will be encrypted with the new password, if provided. -func (serv *Service) RecoverWallet(wltName, seed, seedPassphrase string, password []byte) (Wallet, error) { - serv.Lock() - defer serv.Unlock() - if !serv.config.EnableWalletAPI { - return nil, ErrWalletAPIDisabled - } - - w, err := serv.getWallet(wltName) - if err != nil { - return nil, err - } - - if !w.IsEncrypted() { - return nil, ErrWalletNotEncrypted - } - - switch w.Type() { - case WalletTypeDeterministic, WalletTypeBip44: - default: - return nil, ErrWalletTypeNotRecoverable - } - - // Create a wallet from this seed and compare the fingerprint - w2, err := NewWallet(wltName, Options{ - Type: w.Type(), - Coin: w.Coin(), - Seed: seed, - SeedPassphrase: seedPassphrase, - GenerateN: 1, - }) - if err != nil { - err = NewError(fmt.Errorf("RecoverWallet failed to create temporary wallet for fingerprint comparison: %v", err)) - logger.Critical().WithError(err).Error() - return nil, err - } - if w.Fingerprint() != w2.Fingerprint() { - return nil, ErrWalletRecoverSeedWrong - } - - // Create a new wallet with the same number of addresses, encrypting if needed - w3, err := NewWallet(wltName, Options{ - Type: w.Type(), - Coin: w.Coin(), - Label: w.Label(), - Seed: seed, - SeedPassphrase: seedPassphrase, - Encrypt: len(password) != 0, - Password: password, - CryptoType: w.CryptoType(), - GenerateN: uint64(w.EntriesLen()), - }) - if err != nil { - return nil, err - } - - // Preserve the timestamp of the old wallet - w3.SetTimestamp(w.Timestamp()) - - // Save to disk - if err := Save(w3, serv.config.WalletDir); err != nil { - return nil, err - } - - serv.wallets.set(w3) - - return w3.Clone(), nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/transaction.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/transaction.go deleted file mode 100644 index 5b3b0dd..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/transaction.go +++ /dev/null @@ -1,286 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/coin" - "github.com/SkycoinProject/skycoin/src/transaction" -) - -var ( - // ErrUnknownAddress is returned if an address is not found in a wallet - ErrUnknownAddress = NewError(errors.New("address not found in wallet")) - // ErrUnknownUxOut is returned if a uxout is not owned by any address in a wallet - ErrUnknownUxOut = NewError(errors.New("uxout is not owned by any address in the wallet")) - // ErrWalletCantSign is returned is attempting to sign a transaction with a wallet - // that does not have the capability to sign transactions (e.g. an xpub or watch wallet) - ErrWalletCantSign = NewError(errors.New("wallet does not have the signing capability")) -) - -func validateSignIndexes(x []int, uxOuts []coin.UxOut) error { - if len(x) > len(uxOuts) { - return errors.New("Number of signature indexes exceeds number of inputs") - } - - for _, i := range x { - if i >= len(uxOuts) || i < 0 { - return errors.New("Signature index out of range") - } - } - - m := make(map[int]struct{}, len(x)) - for _, i := range x { - if _, ok := m[i]; ok { - return errors.New("Duplicate value in signature indexes") - } - m[i] = struct{}{} - } - - return nil -} - -func copyTransaction(txn *coin.Transaction) *coin.Transaction { - txnHash := txn.Hash() - txnInnerHash := txn.HashInner() - - txn2 := *txn - txn2.Sigs = make([]cipher.Sig, len(txn.Sigs)) - copy(txn2.Sigs, txn.Sigs) - txn2.In = make([]cipher.SHA256, len(txn.In)) - copy(txn2.In, txn.In) - txn2.Out = make([]coin.TransactionOutput, len(txn.Out)) - copy(txn2.Out, txn.Out) - - if txnInnerHash != txn2.HashInner() { - logger.Panic("copyTransaction copy broke InnerHash") - } - if txnHash != txn2.Hash() { - logger.Panic("copyTransaction copy broke Hash") - } - - return &txn2 -} - -// SignTransaction signs a transaction. Specific inputs may be signed by specifying signIndexes. -// If signIndexes is empty, all inputs will be signed. -// The transaction should already have a valid header. The transaction may be partially signed, -// but a valid existing signature cannot be overwritten. -// Clients should avoid signing the same transaction multiple times. -func SignTransaction(w Wallet, txn *coin.Transaction, signIndexes []int, uxOuts []coin.UxOut) (*coin.Transaction, error) { - switch w.Type() { - case WalletTypeXPub: - return nil, ErrWalletCantSign - } - - signedTxn := copyTransaction(txn) - txnInnerHash := signedTxn.HashInner() - - if w.IsEncrypted() { - return nil, ErrWalletEncrypted - } - - if txnInnerHash != signedTxn.InnerHash { - return nil, NewError(errors.New("Transaction inner hash does not match computed inner hash")) - } - - if len(signedTxn.Sigs) == 0 { - return nil, NewError(errors.New("Transaction signatures array is empty")) - } - if signedTxn.IsFullySigned() { - return nil, NewError(errors.New("Transaction is fully signed")) - } - - if len(signedTxn.In) == 0 { - return nil, NewError(errors.New("No transaction inputs to sign")) - } - if len(uxOuts) != len(signedTxn.In) { - return nil, errors.New("len(uxOuts) != len(txn.In)") - } - if err := validateSignIndexes(signIndexes, uxOuts); err != nil { - return nil, NewError(err) - } - - nMissingSigs := 0 - for _, s := range signedTxn.Sigs { - if s.Null() { - nMissingSigs++ - } - } - - // Build a mapping of addresses to the inputs that need to be signed - addrs := make(map[cipher.Address][]int) - if len(signIndexes) > 0 { - for _, in := range signIndexes { - if !signedTxn.Sigs[in].Null() { - return nil, NewError(fmt.Errorf("Transaction is already signed at index %d", in)) - } - addrs[uxOuts[in].Body.Address] = append(addrs[uxOuts[in].Body.Address], in) - } - } else { - for i, o := range uxOuts { - if !signedTxn.Sigs[i].Null() { - continue - } - addrs[o.Body.Address] = append(addrs[o.Body.Address], i) - } - } - - // Check that the wallet has all addresses needed for signing - toSign := make(map[int][]int) - for i, e := range w.GetEntries() { - if len(toSign) == len(addrs) { - break - } - addr := e.SkycoinAddress() - if x, ok := addrs[addr]; ok { - toSign[i] = x - } - } - - if len(toSign) != len(addrs) { - return nil, NewError(errors.New("Wallet cannot sign all requested inputs")) - } - - // Sign the selected inputs - for k, v := range toSign { - for _, x := range v { - if !signedTxn.Sigs[x].Null() { - return nil, NewError(fmt.Errorf("Transaction is already signed at index %d", x)) - } - - if err := signedTxn.SignInput(w.GetEntryAt(k).Secret, x); err != nil { - return nil, err - } - } - } - - if err := signedTxn.UpdateHeader(); err != nil { - return nil, err - } - - // Sanity check - if txnInnerHash != signedTxn.HashInner() { - err := errors.New("Transaction inner hash modified in the process of signing") - logger.Critical().WithError(err).Error() - return nil, err - } - - if len(signIndexes) == 0 || len(signIndexes) == nMissingSigs { - if !signedTxn.IsFullySigned() { - return nil, errors.New("Transaction is not fully signed, but should be") - } - } else { - if signedTxn.IsFullySigned() { - return nil, errors.New("Transaction is fully signed, but shouldn't be") - } - } - - return signedTxn, nil -} - -// CreateTransaction creates an unsigned transaction based upon transaction.Params. -// Set the password as nil if the wallet is not encrypted, otherwise the password must be provided. -// NOTE: Caller must ensure that auxs correspond to params.Wallet.Addresses and params.Wallet.UxOuts options -// Outputs to spend are chosen from the pool of outputs provided. -// The outputs are chosen by the following procedure: -// - All outputs are merged into one list and are sorted coins highest, hours lowest, with the hash as a tiebreaker -// - Outputs are chosen from the beginning of this list, until the requested amount of coins is met. -// If hours are also specified, selection continues until the requested amount of hours are met. -// - If the total amount of coins in the chosen outputs is exactly equal to the requested amount of coins, -// such that there would be no change output but hours remain as change, another output will be chosen to create change, -// if the coinhour cost of adding that output is less than the coinhours that would be lost as change -// If receiving hours are not explicitly specified, hours are allocated amongst the receiving outputs proportional to the number of coins being sent to them. -// If the change address is not specified, the address whose bytes are lexically sorted first is chosen from the owners of the outputs being spent. -// WARNING: This method is not concurrent-safe if operating on the same wallet. Use Service.View or Service.ViewSecrets to lock the wallet, or use your own lock. -func CreateTransaction(w Wallet, p transaction.Params, auxs coin.AddressUxOuts, headTime uint64) (*coin.Transaction, []transaction.UxBalance, error) { - if err := p.Validate(); err != nil { - return nil, nil, err - } - - // Check that auxs does not contain addresses that are not known to this wallet - for a := range auxs { - if !w.HasEntry(a) { - return nil, nil, fmt.Errorf("Address %s from auxs not found in wallet", a) - } - } - - // Generate a new change address for bip44 wallets - var changeEntry *Entry - if p.ChangeAddress == nil && w.Type() == WalletTypeBip44 { - e, err := w.(*Bip44Wallet).PeekChangeEntry() - if err != nil { - logger.Critical().WithError(err).Error("PeekChangeEntry failed") - return nil, nil, fmt.Errorf("PeekChangeEntry failed: %v", err) - } - changeAddr := e.Address.(cipher.Address) - p.ChangeAddress = &changeAddr - changeEntry = &e - } - - txn, uxb, err := transaction.Create(p, auxs, headTime) - - if err == nil && changeEntry != nil && w.Type() == WalletTypeBip44 { - // Commit the change address to the bip44 wallet, assuming it will be used - if e, err := w.(*Bip44Wallet).GenerateChangeEntry(); err != nil { - logger.WithError(err).Panic("GenerateChangeEntry failed after a PeekChangeEntry") - } else if e != *changeEntry { - logger.Panicf("GenerateChangeEntry produced a different change entry than PeekChangeEntry: %s != %s", e.Address, changeEntry.Address) - } - } - - return txn, uxb, err -} - -// CreateTransactionSigned creates and signs a transaction based upon transaction.Params. -// Set the password as nil if the wallet is not encrypted, otherwise the password must be provided. -// Refer to CreateTransaction for information about transaction creation. -func CreateTransactionSigned(w Wallet, p transaction.Params, auxs coin.AddressUxOuts, headTime uint64) (*coin.Transaction, []transaction.UxBalance, error) { - txn, uxb, err := CreateTransaction(w, p, auxs, headTime) - if err != nil { - return nil, nil, err - } - - logger.Infof("CreateTransactionSigned: signing %d inputs", len(uxb)) - - // Sign the transaction - entriesMap := make(map[cipher.Address]Entry) - for i, s := range uxb { - entry, ok := entriesMap[s.Address] - if !ok { - entry, ok = w.GetEntry(s.Address) - if !ok { - // This should not occur because CreateTransaction should have checked it already - err := fmt.Errorf("Chosen spend address %s not found in wallet", s.Address) - logger.Critical().WithError(err).Error() - return nil, nil, err - } - entriesMap[s.Address] = entry - } - - if err := txn.SignInput(entry.Secret, i); err != nil { - logger.Critical().WithError(err).Errorf("CreateTransaction SignInput(%d) failed", i) - return nil, nil, err - } - } - - // Sanity check the signed transaction - if err := verifyCreatedSignedInvariants(p, txn, uxb); err != nil { - return nil, nil, err - } - - return txn, uxb, nil -} - -func verifyCreatedSignedInvariants(p transaction.Params, txn *coin.Transaction, inputs []transaction.UxBalance) error { - if !txn.IsFullySigned() { - return errors.New("Transaction is not fully signed") - } - - if err := transaction.VerifyCreatedInvariants(p, txn, inputs); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallet.go deleted file mode 100644 index 9546b9b..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallet.go +++ /dev/null @@ -1,727 +0,0 @@ -/* -Package wallet implements wallets and the wallet database service -*/ -package wallet - -import ( - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip44" - "github.com/SkycoinProject/skycoin/src/util/file" - "github.com/SkycoinProject/skycoin/src/util/logging" -) - -// Error wraps wallet-related errors. -// It wraps errors caused by user input, but not errors caused by programmer input or internal issues. -type Error struct { - error -} - -// NewError creates an Error -func NewError(err error) error { - if err == nil { - return nil - } - return Error{err} -} - -var ( - // Version represents the current wallet version - Version = "0.4" - - logger = logging.MustGetLogger("wallet") - - // ErrInvalidEncryptedField is returned if a wallet's Meta.encrypted value is invalid. - ErrInvalidEncryptedField = NewError(errors.New(`encrypted field value is not valid, must be "true", "false" or ""`)) - // ErrWalletEncrypted is returned when trying to generate addresses or sign tx in encrypted wallet - ErrWalletEncrypted = NewError(errors.New("wallet is encrypted")) - // ErrWalletNotEncrypted is returned when trying to decrypt unencrypted wallet - ErrWalletNotEncrypted = NewError(errors.New("wallet is not encrypted")) - // ErrMissingPassword is returned when trying to create wallet with encryption, but password is not provided. - ErrMissingPassword = NewError(errors.New("missing password")) - // ErrMissingEncrypt is returned when trying to create wallet with password, but options.Encrypt is not set. - ErrMissingEncrypt = NewError(errors.New("missing encrypt")) - // ErrInvalidPassword is returned if decrypts secrets failed - ErrInvalidPassword = NewError(errors.New("invalid password")) - // ErrMissingSeed is returned when trying to create wallet without a seed - ErrMissingSeed = NewError(errors.New("missing seed")) - // ErrMissingAuthenticated is returned if try to decrypt a scrypt chacha20poly1305 encrypted wallet, and find no authenticated metadata. - ErrMissingAuthenticated = NewError(errors.New("missing authenticated metadata")) - // ErrWrongCryptoType is returned when decrypting wallet with wrong crypto method - ErrWrongCryptoType = NewError(errors.New("wrong crypto type")) - // ErrWalletNotExist is returned if a wallet does not exist - ErrWalletNotExist = NewError(errors.New("wallet doesn't exist")) - // ErrSeedUsed is returned if a wallet already exists with the same seed - ErrSeedUsed = NewError(errors.New("a wallet already exists with this seed")) - // ErrXPubKeyUsed is returned if a wallet already exists with the same xpub key - ErrXPubKeyUsed = NewError(errors.New("a wallet already exists with this xpub key")) - // ErrWalletAPIDisabled is returned when trying to do wallet actions while the EnableWalletAPI option is false - ErrWalletAPIDisabled = NewError(errors.New("wallet api is disabled")) - // ErrSeedAPIDisabled is returned when trying to get seed of wallet while the EnableWalletAPI or EnableSeedAPI is false - ErrSeedAPIDisabled = NewError(errors.New("wallet seed api is disabled")) - // ErrWalletNameConflict represents the wallet name conflict error - ErrWalletNameConflict = NewError(errors.New("wallet name would conflict with existing wallet, renaming")) - // ErrWalletRecoverSeedWrong is returned if the seed or seed passphrase does not match the specified wallet when recovering - ErrWalletRecoverSeedWrong = NewError(errors.New("wallet recovery seed or seed passphrase is wrong")) - // ErrNilTransactionsFinder is returned if Options.ScanN > 0 but a nil TransactionsFinder was provided - ErrNilTransactionsFinder = NewError(errors.New("scan ahead requested but balance getter is nil")) - // ErrInvalidCoinType is returned for invalid coin types - ErrInvalidCoinType = NewError(errors.New("invalid coin type")) - // ErrInvalidWalletType is returned for invalid wallet types - ErrInvalidWalletType = NewError(errors.New("invalid wallet type")) - // ErrWalletTypeNotRecoverable is returned by RecoverWallet is the wallet type does not support recovery - ErrWalletTypeNotRecoverable = NewError(errors.New("wallet type is not recoverable")) - // ErrWalletPermission is returned when updating a wallet without writing permission - ErrWalletPermission = NewError(errors.New("saving wallet permission denied")) -) - -const ( - // WalletExt wallet file extension - WalletExt = "wlt" - - // WalletTimestampFormat wallet timestamp layout - WalletTimestampFormat = "2006_01_02" - - // CoinTypeSkycoin skycoin type - CoinTypeSkycoin CoinType = "skycoin" - // CoinTypeBitcoin bitcoin type - CoinTypeBitcoin CoinType = "bitcoin" - - // WalletTypeDeterministic deterministic wallet type. - // Uses the original Skycoin deterministic key generator. - WalletTypeDeterministic = "deterministic" - // WalletTypeCollection collection wallet type. - // Does not use any key generator; keys must be added explicitly - WalletTypeCollection = "collection" - // WalletTypeBip44 bip44 HD wallet type. - // Follow the bip44 spec. - WalletTypeBip44 = "bip44" - // WalletTypeXPub xpub HD wallet type. - // Allows generating addresses without a secret key - WalletTypeXPub = "xpub" -) - -// ResolveCoinType normalizes a coin type string to a CoinType constant -func ResolveCoinType(s string) (CoinType, error) { - switch strings.ToLower(s) { - case "sky", "skycoin": - return CoinTypeSkycoin, nil - case "btc", "bitcoin": - return CoinTypeBitcoin, nil - default: - return CoinType(""), ErrInvalidCoinType - } -} - -// IsValidWalletType returns true if a wallet type is recognized -func IsValidWalletType(t string) bool { - switch t { - case WalletTypeDeterministic, - WalletTypeCollection, - WalletTypeBip44, - WalletTypeXPub: - return true - default: - return false - } -} - -// CoinType represents the wallet coin type, which refers to the pubkey2addr method used -type CoinType string - -// NewWalletFilename generates a filename from the current time and random bytes -func NewWalletFilename() string { - timestamp := time.Now().Format(WalletTimestampFormat) - // should read in wallet files and make sure does not exist - padding := hex.EncodeToString((cipher.RandByte(2))) - return fmt.Sprintf("%s_%s.%s", timestamp, padding, WalletExt) -} - -// Options options that could be used when creating a wallet -type Options struct { - Type string // wallet type: deterministic, collection. Refers to which key generation mechanism is used. - Coin CoinType // coin type: skycoin, bitcoin, etc. Refers to which pubkey2addr method is used. - Bip44Coin *bip44.CoinType // bip44 path coin type - Label string // wallet label - Seed string // wallet seed - SeedPassphrase string // wallet seed passphrase (bip44 wallets only) - Encrypt bool // whether the wallet need to be encrypted. - Password []byte // password that would be used for encryption, and would only be used when 'Encrypt' is true. - CryptoType CryptoType // wallet encryption type, scrypt-chacha20poly1305 or sha256-xor. - ScanN uint64 // number of addresses that're going to be scanned for a balance. The highest address with a balance will be used. - GenerateN uint64 // number of addresses to generate, regardless of balance - XPub string // xpub key (xpub wallets only) -} - -// newWallet creates a wallet instance with given name and options. -func newWallet(wltName string, opts Options, tf TransactionsFinder) (Wallet, error) { - wltType := opts.Type - if wltType == "" { - return nil, NewError(errors.New("wallet type is required")) - } - if !IsValidWalletType(wltType) { - return nil, ErrInvalidWalletType - } - - lastSeed := "" - if wltType == WalletTypeDeterministic { - lastSeed = opts.Seed - } - - var bip44Coin bip44.CoinType - if wltType == WalletTypeBip44 { - if opts.Bip44Coin == nil { - switch opts.Coin { - case CoinTypeBitcoin: - bip44Coin = bip44.CoinTypeBitcoin - case CoinTypeSkycoin: - bip44Coin = bip44.CoinTypeSkycoin - default: - bip44Coin = bip44.CoinTypeSkycoin - } - } else { - bip44Coin = *opts.Bip44Coin - } - } - - if opts.SeedPassphrase != "" && wltType != WalletTypeBip44 { - return nil, NewError(fmt.Errorf("seedPassphrase is only used for %q wallets", WalletTypeBip44)) - } - - if opts.XPub != "" && wltType != WalletTypeXPub { - return nil, NewError(fmt.Errorf("xpub is only used for %q wallets", WalletTypeXPub)) - } - - switch wltType { - case WalletTypeDeterministic, WalletTypeBip44: - if opts.Seed == "" { - return nil, ErrMissingSeed - } - - if opts.ScanN > 0 && tf == nil { - return nil, ErrNilTransactionsFinder - } - - case WalletTypeXPub: - if opts.Seed != "" { - return nil, NewError(fmt.Errorf("seed should not be provided for %q wallets", wltType)) - } - - if opts.ScanN > 0 && tf == nil { - return nil, ErrNilTransactionsFinder - } - - case WalletTypeCollection: - if opts.Seed != "" { - return nil, NewError(fmt.Errorf("seed should not be provided for %q wallets", wltType)) - } - - default: - return nil, ErrInvalidWalletType - } - - coin := opts.Coin - if coin == "" { - coin = CoinTypeSkycoin - } - coin, err := ResolveCoinType(string(coin)) - if err != nil { - return nil, err - } - - meta := Meta{ - metaFilename: wltName, - metaVersion: Version, - metaLabel: opts.Label, - metaSeed: opts.Seed, - metaLastSeed: lastSeed, - metaSeedPassphrase: opts.SeedPassphrase, - metaTimestamp: strconv.FormatInt(time.Now().Unix(), 10), - metaType: wltType, - metaCoin: string(coin), - metaEncrypted: "false", - metaCryptoType: "", - metaSecrets: "", - metaXPub: opts.XPub, - } - - // Create the wallet - var w Wallet - switch wltType { - case WalletTypeDeterministic: - w, err = newDeterministicWallet(meta) - case WalletTypeCollection: - w, err = newCollectionWallet(meta) - case WalletTypeBip44: - meta.setBip44Coin(bip44Coin) - w, err = newBip44Wallet(meta) - case WalletTypeXPub: - meta.setXPub(opts.XPub) - w, err = newXPubWallet(meta) - default: - logger.Panic("unhandled wltType") - } - - if err != nil { - logger.WithError(err).WithField("walletType", wltType).Error("newWallet failed") - return nil, err - } - - // Generate wallet addresses - switch wltType { - case WalletTypeDeterministic, WalletTypeBip44, WalletTypeXPub: - generateN := opts.GenerateN - if generateN == 0 { - generateN = 1 - } - - logger.WithFields(logrus.Fields{ - "generateN": generateN, - "walletType": wltType, - }).Infof("Generating addresses for wallet") - - if _, err := w.GenerateAddresses(generateN); err != nil { - return nil, err - } - - if opts.ScanN != 0 && coin != CoinTypeSkycoin { - return nil, errors.New("Wallet scanning is only supported for Skycoin address wallets") - } - - if opts.ScanN > generateN { - // Scan for addresses with balances - logger.WithFields(logrus.Fields{ - "scanN": opts.ScanN, - "walletType": wltType, - }).Info("Scanning addresses for wallet") - if err := w.ScanAddresses(opts.ScanN, tf); err != nil { - return nil, err - } - } - - case WalletTypeCollection: - if opts.GenerateN != 0 || opts.ScanN != 0 { - return nil, NewError(fmt.Errorf("wallet scanning is not defined for %q wallets", wltType)) - } - - default: - logger.Panic("unhandled wltType") - } - - // Validate the wallet, before encrypting - if err := w.Validate(); err != nil { - return nil, err - } - - // Check if the wallet should be encrypted - if !opts.Encrypt { - if len(opts.Password) != 0 { - return nil, ErrMissingEncrypt - } - return w, nil - } - - // Check if the password is provided - if len(opts.Password) == 0 { - return nil, ErrMissingPassword - } - - // Check crypto type - if opts.CryptoType == "" { - opts.CryptoType = DefaultCryptoType - } - - if _, err := getCrypto(opts.CryptoType); err != nil { - return nil, err - } - - // Encrypt the wallet - if err := Lock(w, opts.Password, opts.CryptoType); err != nil { - return nil, err - } - - // Validate the wallet again, after encrypting - if err := w.Validate(); err != nil { - return nil, err - } - - return w, nil -} - -// NewWallet creates wallet without scanning addresses -func NewWallet(wltName string, opts Options) (Wallet, error) { - return newWallet(wltName, opts, nil) -} - -// NewWalletScanAhead creates wallet and scan ahead N addresses -func NewWalletScanAhead(wltName string, opts Options, tf TransactionsFinder) (Wallet, error) { - return newWallet(wltName, opts, tf) -} - -// Lock encrypts the wallet with the given password and specific crypto type -func Lock(w Wallet, password []byte, cryptoType CryptoType) error { - if len(password) == 0 { - return ErrMissingPassword - } - - if w.IsEncrypted() { - return ErrWalletEncrypted - } - - wlt := w.Clone() - - // Records seeds in secrets - ss := make(Secrets) - defer func() { - // Wipes all unencrypted sensitive data - ss.erase() - wlt.Erase() - }() - - wlt.PackSecrets(ss) - - sb, err := ss.serialize() - if err != nil { - return err - } - - crypto, err := getCrypto(cryptoType) - if err != nil { - return err - } - - // Encrypts the secrets - encSecret, err := crypto.Encrypt(sb, password) - if err != nil { - return err - } - - // Sets wallet as encrypted - wlt.SetEncrypted(cryptoType, string(encSecret)) - - // Update the wallet to the latest version, which indicates encryption support - wlt.SetVersion(Version) - - // Wipes unencrypted sensitive data - wlt.Erase() - - // Wipes the secret fields in w - w.Erase() - - // Replace the original wallet with new encrypted wallet - w.CopyFrom(wlt) - return nil -} - -// Unlock decrypts the wallet into a temporary decrypted copy of the wallet -// Returns error if the decryption fails -// The temporary decrypted wallet should be erased from memory when done. -func Unlock(w Wallet, password []byte) (Wallet, error) { - if !w.IsEncrypted() { - return nil, ErrWalletNotEncrypted - } - - if len(password) == 0 { - return nil, ErrMissingPassword - } - - wlt := w.Clone() - - // Gets the secrets string - sstr := w.Secrets() - if sstr == "" { - return nil, errors.New("secrets missing from wallet") - } - - ct := w.CryptoType() - if ct == "" { - return nil, errors.New("missing crypto type") - } - - // Gets the crypto module - crypto, err := getCrypto(ct) - if err != nil { - return nil, err - } - - // Decrypts the secrets - sb, err := crypto.Decrypt([]byte(sstr), password) - if err != nil { - return nil, ErrInvalidPassword - } - - defer func() { - // Wipe the data from the secrets bytes buffer - for i := range sb { - sb[i] = 0 - } - }() - - // Deserialize into secrets - ss := make(Secrets) - defer ss.erase() - if err := ss.deserialize(sb); err != nil { - return nil, err - } - - if err := wlt.UnpackSecrets(ss); err != nil { - return nil, err - } - - wlt.SetDecrypted() - - return wlt, nil -} - -// Wallet defines the wallet API -type Wallet interface { - Find(string) string - Seed() string - LastSeed() string - SeedPassphrase() string - Timestamp() int64 - SetTimestamp(int64) - Coin() CoinType - Bip44Coin() bip44.CoinType - Type() string - Label() string - SetLabel(string) - Filename() string - IsEncrypted() bool - SetEncrypted(cryptoType CryptoType, encryptedSecrets string) - SetDecrypted() - CryptoType() CryptoType - Version() string - SetVersion(string) - AddressConstructor() func(cipher.PubKey) cipher.Addresser - Secrets() string - XPub() string - - UnpackSecrets(ss Secrets) error - PackSecrets(ss Secrets) - - Erase() - Clone() Wallet - CopyFrom(src Wallet) - CopyFromRef(src Wallet) - - ToReadable() Readable - - Validate() error - - Fingerprint() string - GetAddresses() []cipher.Addresser - GetSkycoinAddresses() ([]cipher.Address, error) - GetEntryAt(i int) Entry - GetEntry(cipher.Address) (Entry, bool) - HasEntry(cipher.Address) bool - EntriesLen() int - GetEntries() Entries - - GenerateAddresses(num uint64) ([]cipher.Addresser, error) - GenerateSkycoinAddresses(num uint64) ([]cipher.Address, error) - ScanAddresses(scanN uint64, tf TransactionsFinder) error -} - -// GuardUpdate executes a function within the context of a read-write managed decrypted wallet. -// Returns ErrWalletNotEncrypted if wallet is not encrypted. -func GuardUpdate(w Wallet, password []byte, fn func(w Wallet) error) error { - if !w.IsEncrypted() { - return ErrWalletNotEncrypted - } - - if len(password) == 0 { - return ErrMissingPassword - } - - cryptoType := w.CryptoType() - wlt, err := Unlock(w, password) - if err != nil { - return err - } - - defer wlt.Erase() - - if err := fn(wlt); err != nil { - return err - } - - if err := Lock(wlt, password, cryptoType); err != nil { - return err - } - - w.CopyFromRef(wlt) - - // Wipes all sensitive data - w.Erase() - return nil -} - -// GuardView executes a function within the context of a read-only managed decrypted wallet. -// Returns ErrWalletNotEncrypted if wallet is not encrypted. -func GuardView(w Wallet, password []byte, f func(w Wallet) error) error { - if !w.IsEncrypted() { - return ErrWalletNotEncrypted - } - - if len(password) == 0 { - return ErrMissingPassword - } - - wlt, err := Unlock(w, password) - if err != nil { - return err - } - - defer wlt.Erase() - - return f(wlt) -} - -type walletLoadMeta struct { - Meta struct { - Type string `json:"type"` - } `json:"meta"` -} - -type walletLoader interface { - SetFilename(string) - SetCoin(CoinType) - Coin() CoinType - ToWallet() (Wallet, error) -} - -// Load loads wallet from a given file -func Load(filename string) (Wallet, error) { - if _, err := os.Stat(filename); os.IsNotExist(err) { - return nil, fmt.Errorf("wallet %q doesn't exist", filename) - } - - // Load the wallet meta type field from JSON - var m walletLoadMeta - if err := file.LoadJSON(filename, &m); err != nil { - logger.WithError(err).WithField("filename", filename).Error("Load: file.LoadJSON failed") - return nil, err - } - - if !IsValidWalletType(m.Meta.Type) { - logger.WithError(ErrInvalidWalletType).WithFields(logrus.Fields{ - "filename": filename, - "walletType": m.Meta.Type, - }).Error("wallet meta loaded from disk has invalid wallet type") - return nil, fmt.Errorf("invalid wallet %q: %v", filename, ErrInvalidWalletType) - } - - // Depending on the wallet type in the wallet metadata header, load the full wallet data - var rw walletLoader - var err error - switch m.Meta.Type { - case WalletTypeDeterministic: - logger.WithField("filename", filename).Info("LoadReadableDeterministicWallet") - rw, err = LoadReadableDeterministicWallet(filename) - case WalletTypeCollection: - logger.WithField("filename", filename).Info("LoadReadableCollectionWallet") - rw, err = LoadReadableCollectionWallet(filename) - case WalletTypeBip44: - logger.WithField("filename", filename).Info("LoadReadableBip44Wallet") - rw, err = LoadReadableBip44Wallet(filename) - case WalletTypeXPub: - logger.WithField("filename", filename).Info("LoadReadableXPubWallet") - rw, err = LoadReadableXPubWallet(filename) - default: - err := errors.New("unhandled wallet type") - logger.WithField("walletType", m.Meta.Type).WithError(err).Error("Load failed") - return nil, err - } - - if err != nil { - logger.WithError(err).WithFields(logrus.Fields{ - "filename": filename, - "walletType": m.Meta.Type, - }).Error("Load readable wallet failed") - return nil, err - } - - // Make sure "sky", "btc" normalize to "skycoin", "bitcoin" - ct, err := ResolveCoinType(string(rw.Coin())) - if err != nil { - logger.WithError(err).WithField("coinType", rw.Coin()).Error("Load: invalid coin type") - return nil, fmt.Errorf("invalid wallet %q: %v", filename, err) - } - rw.SetCoin(ct) - - rw.SetFilename(filepath.Base(filename)) - - return rw.ToWallet() -} - -// Save saves the wallet to a directory. The wallet's filename is read from its metadata. -func Save(w Wallet, dir string) error { - rw := w.ToReadable() - return file.SaveJSON(filepath.Join(dir, rw.Filename()), rw, 0600) -} - -// removeBackupFiles removes any *.wlt.bak files whom have version 0.1 and *.wlt matched in the given directory -func removeBackupFiles(dir string) error { - fs, err := filterDir(dir, ".wlt") - if err != nil { - return err - } - - // Creates the .wlt file map - fm := make(map[string]struct{}) - for _, f := range fs { - fm[f] = struct{}{} - } - - // Filters all .wlt.bak files in the directory - bakFs, err := filterDir(dir, ".wlt.bak") - if err != nil { - return err - } - - // Removes the .wlt.bak file that has .wlt matched. - for _, bf := range bakFs { - f := strings.TrimRight(bf, ".bak") - if _, ok := fm[f]; ok { - // Load and check the wallet version - w, err := Load(f) - if err != nil { - return err - } - - if w.Version() == "0.1" { - if err := os.Remove(bf); err != nil { - return err - } - } - } - } - - return nil -} - -func filterDir(dir string, suffix string) ([]string, error) { - files, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - res := []string{} - for _, f := range files { - if !f.IsDir() && strings.HasSuffix(f.Name(), suffix) { - res = append(res, filepath.Join(dir, f.Name())) - } - } - return res, nil -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallets.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallets.go deleted file mode 100644 index 83f32cf..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/wallets.go +++ /dev/null @@ -1,126 +0,0 @@ -package wallet - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" -) - -// Wallets wallets map -type Wallets map[string]Wallet - -// loadWallets Loads all wallets contained in wallet dir. If any regular file in wallet -// dir fails to load, loading is aborted and error returned. Only files with -// extension WalletExt are considered. -func loadWallets(dir string) (Wallets, error) { - entries, err := ioutil.ReadDir(dir) - if err != nil { - logger.WithError(err).WithField("dir", dir).Error("loadWallets: ioutil.ReadDir failed") - return nil, err - } - - wallets := Wallets{} - for _, e := range entries { - if e.Mode().IsRegular() { - name := e.Name() - if !strings.HasSuffix(name, WalletExt) { - logger.WithField("filename", name).Info("loadWallets: skipping file") - continue - } - - fullpath := filepath.Join(dir, name) - w, err := Load(fullpath) - if err != nil { - logger.WithError(err).WithField("filename", fullpath).Error("loadWallets: loadWallet failed") - return nil, err - } - - logger.WithField("filename", fullpath).Info("loadWallets: loaded wallet") - - wallets[name] = w - } - } - - for name, w := range wallets { - if err := w.Validate(); err != nil { - logger.WithError(err).WithField("name", name).Error("loadWallets: wallet.Validate failed") - return nil, err - } - - if w.Coin() != CoinTypeSkycoin { - err := fmt.Errorf("LoadWallets only support skycoin wallets, %s is a %s wallet", name, w.Coin()) - logger.WithError(err).WithField("name", name).Error() - return nil, err - } - } - - return wallets, nil -} - -// add add walet to current wallet -func (wlts Wallets) add(w Wallet) error { - if _, dup := wlts[w.Filename()]; dup { - return ErrWalletNameConflict - } - - wlts[w.Filename()] = w - return nil -} - -// remove wallet of specific id -func (wlts Wallets) remove(id string) { - delete(wlts, id) -} - -// get returns wallet by wallet id -func (wlts Wallets) get(id string) Wallet { - return wlts[id] -} - -// set sets a wallet into the map -func (wlts Wallets) set(w Wallet) { - wlts[w.Filename()] = w.Clone() -} - -// containsDuplicate returns true if there is a duplicate wallet identified by -// the wallet's fingerprint. This is to detect duplicate generative wallets; -// wallets with no defined generation method do not have a concept of being -// a duplicate of another wallet -func (wlts Wallets) containsDuplicate() (string, string, bool) { - m := make(map[string]struct{}, len(wlts)) - for wltID, wlt := range wlts { - fp := wlt.Fingerprint() - if fp == "" { - continue - } - - if _, ok := m[fp]; ok { - return wltID, fp, true - } - - m[fp] = struct{}{} - } - - return "", "", false -} - -// containsEmpty returns true there is an empty wallet and the ID of that wallet if true. -// Does not apply to collection wallets -func (wlts Wallets) containsEmpty() (string, bool) { - for wltID, wlt := range wlts { - switch wlt.Type() { - case WalletTypeCollection: - continue - case WalletTypeDeterministic: - if wlt.EntriesLen() == 0 { - return wltID, true - } - case WalletTypeBip44: - if len(wlt.(*Bip44Wallet).ExternalEntries) == 0 { - return wltID, true - } - } - } - return "", false -} diff --git a/vendor/github.com/SkycoinProject/skycoin/src/wallet/xpub_wallet.go b/vendor/github.com/SkycoinProject/skycoin/src/wallet/xpub_wallet.go deleted file mode 100644 index 32f048c..0000000 --- a/vendor/github.com/SkycoinProject/skycoin/src/wallet/xpub_wallet.go +++ /dev/null @@ -1,348 +0,0 @@ -package wallet - -import ( - "errors" - "fmt" - "math" - "sort" - - "github.com/sirupsen/logrus" - - "github.com/SkycoinProject/skycoin/src/cipher" - "github.com/SkycoinProject/skycoin/src/cipher/bip32" - "github.com/SkycoinProject/skycoin/src/util/file" - "github.com/SkycoinProject/skycoin/src/util/mathutil" -) - -// XPubWallet holds a single xpub (extended public key) and derives child public keys from it. -// Refer to the bip32 spec to understand xpub keys. -// XPub wallets can generate new addresses and receive coins, but can't spend coins -// because the private keys are not available. -type XPubWallet struct { - Meta - Entries Entries - xpub *bip32.PublicKey -} - -// newXPubWallet creates a XPubWallet -func newXPubWallet(meta Meta) (*XPubWallet, error) { - xpub, err := parseXPub(meta.XPub()) - if err != nil { - return nil, err - } - - return &XPubWallet{ - Meta: meta, - xpub: xpub, - }, nil -} - -func parseXPub(xp string) (*bip32.PublicKey, error) { - xpub, err := bip32.DeserializeEncodedPublicKey(xp) - if err != nil { - logger.WithError(err).Error("bip32.DeserializeEncodedPublicKey failed") - return nil, NewError(fmt.Errorf("invalid xpub key: %v", err)) - } - - return xpub, nil -} - -// PackSecrets does nothing because XPubWallet has no secrets -func (w *XPubWallet) PackSecrets(ss Secrets) { -} - -// UnpackSecrets does nothing because XPubWallet has no secrets -func (w *XPubWallet) UnpackSecrets(ss Secrets) error { - return nil -} - -// Clone clones the wallet a new wallet object -func (w *XPubWallet) Clone() Wallet { - xpub, err := parseXPub(w.Meta.XPub()) - if err != nil { - logger.WithError(err).Panic("Clone parseXPub failed") - } - - return &XPubWallet{ - Meta: w.Meta.clone(), - Entries: w.Entries.clone(), - xpub: xpub, - } -} - -// CopyFrom copies the src wallet to w -func (w *XPubWallet) CopyFrom(src Wallet) { - xpub, err := parseXPub(src.XPub()) - if err != nil { - logger.WithError(err).Panic("CopyFrom parseXPub failed") - } - w.xpub = xpub - w.Meta = src.(*XPubWallet).Meta.clone() - w.Entries = src.(*XPubWallet).Entries.clone() -} - -// CopyFromRef copies the src wallet with a pointer dereference -func (w *XPubWallet) CopyFromRef(src Wallet) { - xpub, err := parseXPub(src.XPub()) - if err != nil { - logger.WithError(err).Panic("CopyFromRef parseXPub failed") - } - - *w = *(src.(*XPubWallet)) - w.xpub = xpub -} - -// Erase wipes secret fields in wallet -func (w *XPubWallet) Erase() { - w.Meta.eraseSeeds() - w.Entries.erase() -} - -// ToReadable converts the wallet to its readable (serializable) format -func (w *XPubWallet) ToReadable() Readable { - return NewReadableXPubWallet(w) -} - -// Validate validates the wallet -func (w *XPubWallet) Validate() error { - return w.Meta.validate() -} - -// GetAddresses returns all addresses in wallet -func (w *XPubWallet) GetAddresses() []cipher.Addresser { - return w.Entries.getAddresses() -} - -// GetSkycoinAddresses returns all Skycoin addresses in wallet. The wallet's coin type must be Skycoin. -func (w *XPubWallet) GetSkycoinAddresses() ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("XPubWallet coin type is not skycoin") - } - - return w.Entries.getSkycoinAddresses(), nil -} - -// GetEntries returns a copy of all entries held by the wallet -func (w *XPubWallet) GetEntries() Entries { - return w.Entries.clone() -} - -// EntriesLen returns the number of entries in the wallet -func (w *XPubWallet) EntriesLen() int { - return len(w.Entries) -} - -// GetEntryAt returns entry at a given index in the entries array -func (w *XPubWallet) GetEntryAt(i int) Entry { - return w.Entries[i] -} - -// GetEntry returns entry of given address -func (w *XPubWallet) GetEntry(a cipher.Address) (Entry, bool) { - return w.Entries.get(a) -} - -// HasEntry returns true if the wallet has an Entry with a given cipher.Address. -func (w *XPubWallet) HasEntry(a cipher.Address) bool { - return w.Entries.has(a) -} - -// generateEntries generates up to `num` addresses -func (w *XPubWallet) generateEntries(num uint64, initialChildIdx uint32) (Entries, error) { - if w.Meta.IsEncrypted() { - return nil, ErrWalletEncrypted - } - - if num > math.MaxUint32 { - return nil, NewError(errors.New("XPubWallet.generateEntries num too large")) - } - - // Cap `num` in case it would exceed the maximum child index number - if math.MaxUint32-initialChildIdx < uint32(num) { - num = uint64(math.MaxUint32 - initialChildIdx) - } - - if num == 0 { - return nil, nil - } - - // Generate `num` secret keys from the external chain HDNode, skipping any children that - // are invalid (note that this has probability ~2^-128) - var pubkeys []*bip32.PublicKey - var addressIndices []uint32 - j := initialChildIdx - for i := uint32(0); i < uint32(num); i++ { - k, err := w.xpub.NewPublicChildKey(j) - - var addErr error - j, addErr = mathutil.AddUint32(j, 1) - if addErr != nil { - logger.Critical().WithError(addErr).WithFields(logrus.Fields{ - "num": num, - "initialChildIdx": initialChildIdx, - "childIdx": j, - "i": i, - }).Error("childIdx can't be incremented any further") - return nil, errors.New("childIdx can't be incremented any further") - } - - if err != nil { - if bip32.IsImpossibleChildError(err) { - logger.Critical().WithError(err).WithField("childIdx", j).Error("ImpossibleChild for xpub child element") - continue - } else { - logger.Critical().WithError(err).WithField("childIdx", j).Error("NewPublicChildKey failed unexpectedly") - return nil, err - } - } - - pubkeys = append(pubkeys, k) - addressIndices = append(addressIndices, j-1) - } - - entries := make(Entries, len(pubkeys)) - makeAddress := w.Meta.AddressConstructor() - for i, xp := range pubkeys { - pk := cipher.MustNewPubKey(xp.Key) - entries[i] = Entry{ - Address: makeAddress(pk), - Public: pk, - ChildNumber: addressIndices[i], - } - } - - return entries, nil -} - -// GenerateAddresses generates addresses for the external chain, and appends them to the wallet's entries array -func (w *XPubWallet) GenerateAddresses(num uint64) ([]cipher.Addresser, error) { - entries, err := w.generateEntries(num, nextChildIdx(w.Entries)) - if err != nil { - return nil, err - } - - w.Entries = append(w.Entries, entries...) - - return entries.getAddresses(), nil -} - -// GenerateSkycoinAddresses generates Skycoin addresses for the external chain, and appends them to the wallet's entries array. -// If the wallet's coin type is not Skycoin, returns an error -func (w *XPubWallet) GenerateSkycoinAddresses(num uint64) ([]cipher.Address, error) { - if w.Meta.Coin() != CoinTypeSkycoin { - return nil, errors.New("GenerateSkycoinAddresses called for non-skycoin wallet") - } - - entries, err := w.generateEntries(num, nextChildIdx(w.Entries)) - if err != nil { - return nil, err - } - - w.Entries = append(w.Entries, entries...) - - return entries.getSkycoinAddresses(), nil -} - -// ScanAddresses scans ahead N addresses, -// truncating up to the highest address with any transaction history. -func (w *XPubWallet) ScanAddresses(scanN uint64, tf TransactionsFinder) error { - if w.Meta.IsEncrypted() { - return ErrWalletEncrypted - } - - if scanN == 0 { - return nil - } - - w2 := w.Clone().(*XPubWallet) - - entries, err := scanAddressesBip32(func(num uint64, childIdx uint32) (Entries, error) { - return w2.generateEntries(num, childIdx) - }, scanN, tf, nextChildIdx(w2.Entries)) - if err != nil { - return err - } - - w2.Entries = append(w2.Entries, entries...) - - *w = *w2 - - return nil -} - -// Fingerprint returns a unique ID fingerprint for this wallet, using the first -// child address of the xpub key -func (w *XPubWallet) Fingerprint() string { - // Note: the xpub key is not used as the fingerprint, because it is - // partially sensitive data - addr := "" - if len(w.Entries) == 0 { - if !w.IsEncrypted() { - entries, err := w.generateEntries(1, 0) - if err != nil { - logger.WithError(err).Panic("Fingerprint failed to generate initial entry for empty wallet") - } - addr = entries[0].Address.String() - } - } else { - addr = w.Entries[0].Address.String() - } - - return fmt.Sprintf("%s-%s", w.Type(), addr) -} - -// ReadableXPubWallet used for [de]serialization of an xpub wallet -type ReadableXPubWallet struct { - Meta `json:"meta"` - ReadableEntries `json:"entries"` -} - -// LoadReadableXPubWallet loads an xpub wallet from disk -func LoadReadableXPubWallet(wltFile string) (*ReadableXPubWallet, error) { - var rw ReadableXPubWallet - if err := file.LoadJSON(wltFile, &rw); err != nil { - return nil, err - } - if rw.Type() != WalletTypeXPub { - return nil, ErrInvalidWalletType - } - return &rw, nil -} - -// NewReadableXPubWallet creates readable wallet -func NewReadableXPubWallet(w *XPubWallet) *ReadableXPubWallet { - return &ReadableXPubWallet{ - Meta: w.Meta.clone(), - ReadableEntries: newReadableEntries(w.Entries, w.Meta.Coin(), w.Meta.Type()), - } -} - -// ToWallet convert readable wallet to Wallet -func (rw *ReadableXPubWallet) ToWallet() (Wallet, error) { - w := &XPubWallet{ - Meta: rw.Meta.clone(), - } - - if err := w.Validate(); err != nil { - err := fmt.Errorf("invalid wallet %q: %v", w.Filename(), err) - logger.WithError(err).Error("ReadableXPubWallet.ToWallet Validate failed") - return nil, err - } - - ets, err := rw.ReadableEntries.toWalletEntries(w.Meta.Coin(), w.Meta.Type(), w.Meta.IsEncrypted()) - if err != nil { - logger.WithError(err).Error("ReadableXPubWallet.ToWallet toWalletEntries failed") - return nil, err - } - - w.Entries = ets - - // Sort childNumber low to high - sort.Slice(w.Entries, func(i, j int) bool { - return w.Entries[i].ChildNumber < w.Entries[j].ChildNumber - }) - - w.Entries = ets - - return w, nil -} diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE deleted file mode 100644 index ae80b67..0000000 --- a/vendor/github.com/StackExchange/wmi/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Stack Exchange - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md deleted file mode 100644 index 426d1a4..0000000 --- a/vendor/github.com/StackExchange/wmi/README.md +++ /dev/null @@ -1,6 +0,0 @@ -wmi -=== - -Package wmi provides a WQL interface to Windows WMI. - -Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go deleted file mode 100644 index 9765a53..0000000 --- a/vendor/github.com/StackExchange/wmi/swbemservices.go +++ /dev/null @@ -1,260 +0,0 @@ -// +build windows - -package wmi - -import ( - "fmt" - "reflect" - "runtime" - "sync" - - "github.com/go-ole/go-ole" - "github.com/go-ole/go-ole/oleutil" -) - -// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx -type SWbemServices struct { - //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance - cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method - sWbemLocatorIUnknown *ole.IUnknown - sWbemLocatorIDispatch *ole.IDispatch - queries chan *queryRequest - closeError chan error - lQueryorClose sync.Mutex -} - -type queryRequest struct { - query string - dst interface{} - args []interface{} - finished chan error -} - -// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI -func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { - //fmt.Println("InitializeSWbemServices: Starting") - //TODO: implement connectServerArgs as optional argument for init with connectServer call - s := new(SWbemServices) - s.cWMIClient = c - s.queries = make(chan *queryRequest) - initError := make(chan error) - go s.process(initError) - - err, ok := <-initError - if ok { - return nil, err //Send error to caller - } - //fmt.Println("InitializeSWbemServices: Finished") - return s, nil -} - -// Close will clear and release all of the SWbemServices resources -func (s *SWbemServices) Close() error { - s.lQueryorClose.Lock() - if s == nil || s.sWbemLocatorIDispatch == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices is not Initialized") - } - if s.queries == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices has been closed") - } - //fmt.Println("Close: sending close request") - var result error - ce := make(chan error) - s.closeError = ce //Race condition if multiple callers to close. May need to lock here - close(s.queries) //Tell background to shut things down - s.lQueryorClose.Unlock() - err, ok := <-ce - if ok { - result = err - } - //fmt.Println("Close: finished") - return result -} - -func (s *SWbemServices) process(initError chan error) { - //fmt.Println("process: starting background thread initialization") - //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine - runtime.LockOSThread() - defer runtime.LockOSThread() - - err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) - if err != nil { - oleCode := err.(*ole.OleError).Code() - if oleCode != ole.S_OK && oleCode != S_FALSE { - initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) - return - } - } - defer ole.CoUninitialize() - - unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") - if err != nil { - initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) - return - } else if unknown == nil { - initError <- ErrNilCreateObject - return - } - defer unknown.Release() - s.sWbemLocatorIUnknown = unknown - - dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) - if err != nil { - initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) - return - } - defer dispatch.Release() - s.sWbemLocatorIDispatch = dispatch - - // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs - //fmt.Println("process: initialized. closing initError") - close(initError) - //fmt.Println("process: waiting for queries") - for q := range s.queries { - //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) - errQuery := s.queryBackground(q) - //fmt.Println("process: s.queryBackground finished") - if errQuery != nil { - q.finished <- errQuery - } - close(q.finished) - } - //fmt.Println("process: queries channel closed") - s.queries = nil //set channel to nil so we know it is closed - //TODO: I think the Release/Clear calls can panic if things are in a bad state. - //TODO: May need to recover from panics and send error to method caller instead. - close(s.closeError) -} - -// Query runs the WQL query using a SWbemServices instance and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - s.lQueryorClose.Lock() - if s == nil || s.sWbemLocatorIDispatch == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices is not Initialized") - } - if s.queries == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices has been closed") - } - - //fmt.Println("Query: Sending query request") - qr := queryRequest{ - query: query, - dst: dst, - args: connectServerArgs, - finished: make(chan error), - } - s.queries <- &qr - s.lQueryorClose.Unlock() - err, ok := <-qr.finished - if ok { - //fmt.Println("Query: Finished with error") - return err //Send error to caller - } - //fmt.Println("Query: Finished") - return nil -} - -func (s *SWbemServices) queryBackground(q *queryRequest) error { - if s == nil || s.sWbemLocatorIDispatch == nil { - return fmt.Errorf("SWbemServices is not Initialized") - } - wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart - //fmt.Println("queryBackground: Starting") - - dv := reflect.ValueOf(q.dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType := checkMultiArg(dv) - if mat == multiArgTypeInvalid { - return ErrInvalidEntityType - } - - // service is a SWbemServices - serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) - if err != nil { - return err - } - service := serviceRaw.ToIDispatch() - defer serviceRaw.Clear() - - // result is a SWBemObjectSet - resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) - if err != nil { - return err - } - result := resultRaw.ToIDispatch() - defer resultRaw.Clear() - - count, err := oleInt64(result, "Count") - if err != nil { - return err - } - - enumProperty, err := result.GetProperty("_NewEnum") - if err != nil { - return err - } - defer enumProperty.Clear() - - enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - if enum == nil { - return fmt.Errorf("can't get IEnumVARIANT, enum is nil") - } - defer enum.Release() - - // Initialize a slice with Count capacity - dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) - - var errFieldMismatch error - for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { - if err != nil { - return err - } - - err := func() error { - // item is a SWbemObject, but really a Win32_Process - item := itemRaw.ToIDispatch() - defer item.Release() - - ev := reflect.New(elemType) - if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - return nil - }() - if err != nil { - return err - } - } - //fmt.Println("queryBackground: Finished") - return errFieldMismatch -} diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go deleted file mode 100644 index a951b12..0000000 --- a/vendor/github.com/StackExchange/wmi/wmi.go +++ /dev/null @@ -1,486 +0,0 @@ -// +build windows - -/* -Package wmi provides a WQL interface for WMI on Windows. - -Example code to print names of running processes: - - type Win32_Process struct { - Name string - } - - func main() { - var dst []Win32_Process - q := wmi.CreateQuery(&dst, "") - err := wmi.Query(q, &dst) - if err != nil { - log.Fatal(err) - } - for i, v := range dst { - println(i, v.Name) - } - } - -*/ -package wmi - -import ( - "bytes" - "errors" - "fmt" - "log" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/go-ole/go-ole" - "github.com/go-ole/go-ole/oleutil" -) - -var l = log.New(os.Stdout, "", log.LstdFlags) - -var ( - ErrInvalidEntityType = errors.New("wmi: invalid entity type") - // ErrNilCreateObject is the error returned if CreateObject returns nil even - // if the error was nil. - ErrNilCreateObject = errors.New("wmi: create object returned nil") - lock sync.Mutex -) - -// S_FALSE is returned by CoInitializeEx if it was already called on this thread. -const S_FALSE = 0x00000001 - -// QueryNamespace invokes Query with the given namespace on the local machine. -func QueryNamespace(query string, dst interface{}, namespace string) error { - return Query(query, dst, nil, namespace) -} - -// Query runs the WQL query and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -// -// Query is a wrapper around DefaultClient.Query. -func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - if DefaultClient.SWbemServicesClient == nil { - return DefaultClient.Query(query, dst, connectServerArgs...) - } - return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) -} - -// A Client is an WMI query client. -// -// Its zero value (DefaultClient) is a usable client. -type Client struct { - // NonePtrZero specifies if nil values for fields which aren't pointers - // should be returned as the field types zero value. - // - // Setting this to true allows stucts without pointer fields to be used - // without the risk failure should a nil value returned from WMI. - NonePtrZero bool - - // PtrNil specifies if nil values for pointer fields should be returned - // as nil. - // - // Setting this to true will set pointer fields to nil where WMI - // returned nil, otherwise the types zero value will be returned. - PtrNil bool - - // AllowMissingFields specifies that struct fields not present in the - // query result should not result in an error. - // - // Setting this to true allows custom queries to be used with full - // struct definitions instead of having to define multiple structs. - AllowMissingFields bool - - // SWbemServiceClient is an optional SWbemServices object that can be - // initialized and then reused across multiple queries. If it is null - // then the method will initialize a new temporary client each time. - SWbemServicesClient *SWbemServices -} - -// DefaultClient is the default Client and is used by Query, QueryNamespace -var DefaultClient = &Client{} - -// Query runs the WQL query and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - dv := reflect.ValueOf(dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType := checkMultiArg(dv) - if mat == multiArgTypeInvalid { - return ErrInvalidEntityType - } - - lock.Lock() - defer lock.Unlock() - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) - if err != nil { - oleCode := err.(*ole.OleError).Code() - if oleCode != ole.S_OK && oleCode != S_FALSE { - return err - } - } - defer ole.CoUninitialize() - - unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") - if err != nil { - return err - } else if unknown == nil { - return ErrNilCreateObject - } - defer unknown.Release() - - wmi, err := unknown.QueryInterface(ole.IID_IDispatch) - if err != nil { - return err - } - defer wmi.Release() - - // service is a SWbemServices - serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) - if err != nil { - return err - } - service := serviceRaw.ToIDispatch() - defer serviceRaw.Clear() - - // result is a SWBemObjectSet - resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) - if err != nil { - return err - } - result := resultRaw.ToIDispatch() - defer resultRaw.Clear() - - count, err := oleInt64(result, "Count") - if err != nil { - return err - } - - enumProperty, err := result.GetProperty("_NewEnum") - if err != nil { - return err - } - defer enumProperty.Clear() - - enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - if enum == nil { - return fmt.Errorf("can't get IEnumVARIANT, enum is nil") - } - defer enum.Release() - - // Initialize a slice with Count capacity - dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) - - var errFieldMismatch error - for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { - if err != nil { - return err - } - - err := func() error { - // item is a SWbemObject, but really a Win32_Process - item := itemRaw.ToIDispatch() - defer item.Release() - - ev := reflect.New(elemType) - if err = c.loadEntity(ev.Interface(), item); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - return nil - }() - if err != nil { - return err - } - } - return errFieldMismatch -} - -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. -// StructType is the type of the struct pointed to by the destination argument. -type ErrFieldMismatch struct { - StructType reflect.Type - FieldName string - Reason string -} - -func (e *ErrFieldMismatch) Error() string { - return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", - e.FieldName, e.StructType, e.Reason) -} - -var timeType = reflect.TypeOf(time.Time{}) - -// loadEntity loads a SWbemObject into a struct pointer. -func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { - v := reflect.ValueOf(dst).Elem() - for i := 0; i < v.NumField(); i++ { - f := v.Field(i) - of := f - isPtr := f.Kind() == reflect.Ptr - if isPtr { - ptr := reflect.New(f.Type().Elem()) - f.Set(ptr) - f = f.Elem() - } - n := v.Type().Field(i).Name - if !f.CanSet() { - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "CanSet() is false", - } - } - prop, err := oleutil.GetProperty(src, n) - if err != nil { - if !c.AllowMissingFields { - errFieldMismatch = &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "no such struct field", - } - } - continue - } - defer prop.Clear() - - switch val := prop.Value().(type) { - case int8, int16, int32, int64, int: - v := reflect.ValueOf(val).Int() - switch f.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - f.SetInt(v) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - f.SetUint(uint64(v)) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not an integer class", - } - } - case uint8, uint16, uint32, uint64: - v := reflect.ValueOf(val).Uint() - switch f.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - f.SetInt(int64(v)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - f.SetUint(v) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not an integer class", - } - } - case string: - switch f.Kind() { - case reflect.String: - f.SetString(val) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - iv, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return err - } - f.SetInt(iv) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - uv, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return err - } - f.SetUint(uv) - case reflect.Struct: - switch f.Type() { - case timeType: - if len(val) == 25 { - mins, err := strconv.Atoi(val[22:]) - if err != nil { - return err - } - val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) - } - t, err := time.Parse("20060102150405.000000-0700", val) - if err != nil { - return err - } - f.Set(reflect.ValueOf(t)) - } - } - case bool: - switch f.Kind() { - case reflect.Bool: - f.SetBool(val) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not a bool", - } - } - case float32: - switch f.Kind() { - case reflect.Float32: - f.SetFloat(float64(val)) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not a Float32", - } - } - default: - if f.Kind() == reflect.Slice { - switch f.Type().Elem().Kind() { - case reflect.String: - safeArray := prop.ToArray() - if safeArray != nil { - arr := safeArray.ToValueArray() - fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) - for i, v := range arr { - s := fArr.Index(i) - s.SetString(v.(string)) - } - f.Set(fArr) - } - case reflect.Uint8: - safeArray := prop.ToArray() - if safeArray != nil { - arr := safeArray.ToValueArray() - fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) - for i, v := range arr { - s := fArr.Index(i) - s.SetUint(reflect.ValueOf(v).Uint()) - } - f.Set(fArr) - } - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: fmt.Sprintf("unsupported slice type (%T)", val), - } - } - } else { - typeof := reflect.TypeOf(val) - if typeof == nil && (isPtr || c.NonePtrZero) { - if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { - of.Set(reflect.Zero(of.Type())) - } - break - } - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: fmt.Sprintf("unsupported type (%T)", val), - } - } - } - } - return errFieldMismatch -} - -type multiArgType int - -const ( - multiArgTypeInvalid multiArgType = iota - multiArgTypeStruct - multiArgTypeStructPtr -) - -// checkMultiArg checks that v has type []S, []*S for some struct type S. -// -// It returns what category the slice's elements are, and the reflect.Type -// that represents S. -func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { - if v.Kind() != reflect.Slice { - return multiArgTypeInvalid, nil - } - elemType = v.Type().Elem() - switch elemType.Kind() { - case reflect.Struct: - return multiArgTypeStruct, elemType - case reflect.Ptr: - elemType = elemType.Elem() - if elemType.Kind() == reflect.Struct { - return multiArgTypeStructPtr, elemType - } - } - return multiArgTypeInvalid, nil -} - -func oleInt64(item *ole.IDispatch, prop string) (int64, error) { - v, err := oleutil.GetProperty(item, prop) - if err != nil { - return 0, err - } - defer v.Clear() - - i := int64(v.Val) - return i, nil -} - -// CreateQuery returns a WQL query string that queries all columns of src. where -// is an optional string that is appended to the query, to be used with WHERE -// clauses. In such a case, the "WHERE" string should appear at the beginning. -func CreateQuery(src interface{}, where string) string { - var b bytes.Buffer - b.WriteString("SELECT ") - s := reflect.Indirect(reflect.ValueOf(src)) - t := s.Type() - if s.Kind() == reflect.Slice { - t = t.Elem() - } - if t.Kind() != reflect.Struct { - return "" - } - var fields []string - for i := 0; i < t.NumField(); i++ { - fields = append(fields, t.Field(i).Name) - } - b.WriteString(strings.Join(fields, ", ")) - b.WriteString(" FROM ") - b.WriteString(t.Name()) - b.WriteString(" " + where) - return b.String() -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/LICENSE b/vendor/github.com/VictoriaMetrics/fastcache/LICENSE deleted file mode 100644 index 9a8145e..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 VictoriaMetrics - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/VictoriaMetrics/fastcache/README.md b/vendor/github.com/VictoriaMetrics/fastcache/README.md deleted file mode 100644 index b353214..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/README.md +++ /dev/null @@ -1,116 +0,0 @@ -[![Build Status](https://github.com/VictoriaMetrics/fastcache/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/fastcache/actions) -[![GoDoc](https://godoc.org/github.com/VictoriaMetrics/fastcache?status.svg)](http://godoc.org/github.com/VictoriaMetrics/fastcache) -[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/fastcache)](https://goreportcard.com/report/github.com/VictoriaMetrics/fastcache) -[![codecov](https://codecov.io/gh/VictoriaMetrics/fastcache/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/fastcache) - -# fastcache - fast thread-safe inmemory cache for big number of entries in Go - -### Features - -* Fast. Performance scales on multi-core CPUs. See benchmark results below. -* Thread-safe. Concurrent goroutines may read and write into a single - cache instance. -* The fastcache is designed for storing big number of entries without - [GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487). -* Fastcache automatically evicts old entries when reaching the maximum cache size - set on its creation. -* [Simple API](http://godoc.org/github.com/VictoriaMetrics/fastcache). -* Simple source code. -* Cache may be [saved to file](https://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SaveToFile) - and [loaded from file](https://godoc.org/github.com/VictoriaMetrics/fastcache#LoadFromFile). -* Works on [Google AppEngine](https://cloud.google.com/appengine/docs/go/). - - -### Benchmarks - -`Fastcache` performance is compared with [BigCache](https://github.com/allegro/bigcache), standard Go map -and [sync.Map](https://golang.org/pkg/sync/#Map). - -``` -GOMAXPROCS=4 go test github.com/VictoriaMetrics/fastcache -bench='Set|Get' -benchtime=10s -goos: linux -goarch: amd64 -pkg: github.com/VictoriaMetrics/fastcache -BenchmarkBigCacheSet-4 2000 10566656 ns/op 6.20 MB/s 4660369 B/op 6 allocs/op -BenchmarkBigCacheGet-4 2000 6902694 ns/op 9.49 MB/s 684169 B/op 131076 allocs/op -BenchmarkBigCacheSetGet-4 1000 17579118 ns/op 7.46 MB/s 5046744 B/op 131083 allocs/op -BenchmarkCacheSet-4 5000 3808874 ns/op 17.21 MB/s 1142 B/op 2 allocs/op -BenchmarkCacheGet-4 5000 3293849 ns/op 19.90 MB/s 1140 B/op 2 allocs/op -BenchmarkCacheSetGet-4 2000 8456061 ns/op 15.50 MB/s 2857 B/op 5 allocs/op -BenchmarkStdMapSet-4 2000 10559382 ns/op 6.21 MB/s 268413 B/op 65537 allocs/op -BenchmarkStdMapGet-4 5000 2687404 ns/op 24.39 MB/s 2558 B/op 13 allocs/op -BenchmarkStdMapSetGet-4 100 154641257 ns/op 0.85 MB/s 387405 B/op 65558 allocs/op -BenchmarkSyncMapSet-4 500 24703219 ns/op 2.65 MB/s 3426543 B/op 262411 allocs/op -BenchmarkSyncMapGet-4 5000 2265892 ns/op 28.92 MB/s 2545 B/op 79 allocs/op -BenchmarkSyncMapSetGet-4 1000 14595535 ns/op 8.98 MB/s 3417190 B/op 262277 allocs/op -``` - -`MB/s` column here actually means `millions of operations per second`. -As you can see, `fastcache` is faster than the `BigCache` in all the cases. -`fastcache` is faster than the standard Go map and `sync.Map` on workloads -with inserts. - - -### Limitations - -* Keys and values must be byte slices. Other types must be marshaled before - storing them in the cache. -* Big entries with sizes exceeding 64KB must be stored via [distinct API](http://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SetBig). -* There is no cache expiration. Entries are evicted from the cache only - on cache size overflow. Entry deadline may be stored inside the value in order - to implement cache expiration. - - -### Architecture details - -The cache uses ideas from [BigCache](https://github.com/allegro/bigcache): - -* The cache consists of many buckets, each with its own lock. - This helps scaling the performance on multi-core CPUs, since multiple - CPUs may concurrently access distinct buckets. -* Each bucket consists of a `hash(key) -> (key, value) position` map - and 64KB-sized byte slices (chunks) holding encoded `(key, value)` entries. - Each bucket contains only `O(chunksCount)` pointers. For instance, 64GB cache - would contain ~1M pointers, while similarly-sized `map[string][]byte` - would contain ~1B pointers for short keys and values. This would lead to - [huge GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487). - -64KB-sized chunks reduce memory fragmentation and the total memory usage comparing -to a single big chunk per bucket. -Chunks are allocated off-heap if possible. This reduces total memory usage because -GC collects unused memory more frequently without the need in `GOGC` tweaking. - - -### Users - -* `Fastcache` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources. - See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac) - for more info about `VictoriaMetrics`. - - -### FAQ - -#### What is the difference between `fastcache` and other similar caches like [BigCache](https://github.com/allegro/bigcache) or [FreeCache](https://github.com/coocood/freecache)? - -* `Fastcache` is faster. See benchmark results above. -* `Fastcache` uses less memory due to lower heap fragmentation. This allows - saving many GBs of memory on multi-GB caches. -* `Fastcache` API [is simpler](http://godoc.org/github.com/VictoriaMetrics/fastcache). - The API is designed to be used in zero-allocation mode. - - -#### Why `fastcache` doesn't support cache expiration? - -Because we don't need cache expiration in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). -Cached entries inside `VictoriaMetrics` never expire. They are automatically evicted on cache size overflow. - -It is easy to implement cache expiration on top of `fastcache` by caching values -with marshaled deadlines and verifying deadlines after reading these values -from the cache. - - -#### Why `fastcache` doesn't support advanced features such as [thundering herd protection](https://en.wikipedia.org/wiki/Thundering_herd_problem) or callbacks on entries' eviction? - -Because these features would complicate the code and would make it slower. -`Fastcache` source code is simple - just copy-paste it and implement the feature you want -on top of it. diff --git a/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go b/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go deleted file mode 100644 index 7ca6f48..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go +++ /dev/null @@ -1,152 +0,0 @@ -package fastcache - -import ( - "sync" - "sync/atomic" - - xxhash "github.com/cespare/xxhash/v2" -) - -// maxSubvalueLen is the maximum size of subvalue chunk. -// -// - 16 bytes are for subkey encoding -// - 4 bytes are for len(key)+len(value) encoding inside fastcache -// - 1 byte is implementation detail of fastcache -const maxSubvalueLen = chunkSize - 16 - 4 - 1 - -// maxKeyLen is the maximum size of key. -// -// - 16 bytes are for (hash + valueLen) -// - 4 bytes are for len(key)+len(subkey) -// - 1 byte is implementation detail of fastcache -const maxKeyLen = chunkSize - 16 - 4 - 1 - -// SetBig sets (k, v) to c where len(v) may exceed 64KB. -// -// GetBig must be used for reading stored values. -// -// The stored entry may be evicted at any time either due to cache -// overflow or due to unlikely hash collision. -// Pass higher maxBytes value to New if the added items disappear -// frequently. -// -// It is safe to store entries smaller than 64KB with SetBig. -// -// k and v contents may be modified after returning from SetBig. -func (c *Cache) SetBig(k, v []byte) { - atomic.AddUint64(&c.bigStats.SetBigCalls, 1) - if len(k) > maxKeyLen { - atomic.AddUint64(&c.bigStats.TooBigKeyErrors, 1) - return - } - valueLen := len(v) - valueHash := xxhash.Sum64(v) - - // Split v into chunks with up to 64Kb each. - subkey := getSubkeyBuf() - var i uint64 - for len(v) > 0 { - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(i)) - i++ - subvalueLen := maxSubvalueLen - if len(v) < subvalueLen { - subvalueLen = len(v) - } - subvalue := v[:subvalueLen] - v = v[subvalueLen:] - c.Set(subkey.B, subvalue) - } - - // Write metavalue, which consists of valueHash and valueLen. - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(valueLen)) - c.Set(k, subkey.B) - putSubkeyBuf(subkey) -} - -// GetBig searches for the value for the given k, appends it to dst -// and returns the result. -// -// GetBig returns only values stored via SetBig. It doesn't work -// with values stored via other methods. -// -// k contents may be modified after returning from GetBig. -func (c *Cache) GetBig(dst, k []byte) []byte { - atomic.AddUint64(&c.bigStats.GetBigCalls, 1) - subkey := getSubkeyBuf() - defer putSubkeyBuf(subkey) - - // Read and parse metavalue - subkey.B = c.Get(subkey.B[:0], k) - if len(subkey.B) == 0 { - // Nothing found. - return dst - } - if len(subkey.B) != 16 { - atomic.AddUint64(&c.bigStats.InvalidMetavalueErrors, 1) - return dst - } - valueHash := unmarshalUint64(subkey.B) - valueLen := unmarshalUint64(subkey.B[8:]) - - // Collect result from chunks. - dstLen := len(dst) - if n := dstLen + int(valueLen) - cap(dst); n > 0 { - dst = append(dst[:cap(dst)], make([]byte, n)...) - } - dst = dst[:dstLen] - var i uint64 - for uint64(len(dst)-dstLen) < valueLen { - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(i)) - i++ - dstNew := c.Get(dst, subkey.B) - if len(dstNew) == len(dst) { - // Cannot find subvalue - return dst[:dstLen] - } - dst = dstNew - } - - // Verify the obtained value. - v := dst[dstLen:] - if uint64(len(v)) != valueLen { - atomic.AddUint64(&c.bigStats.InvalidValueLenErrors, 1) - return dst[:dstLen] - } - h := xxhash.Sum64(v) - if h != valueHash { - atomic.AddUint64(&c.bigStats.InvalidValueHashErrors, 1) - return dst[:dstLen] - } - return dst -} - -func getSubkeyBuf() *bytesBuf { - v := subkeyPool.Get() - if v == nil { - return &bytesBuf{} - } - return v.(*bytesBuf) -} - -func putSubkeyBuf(bb *bytesBuf) { - bb.B = bb.B[:0] - subkeyPool.Put(bb) -} - -var subkeyPool sync.Pool - -type bytesBuf struct { - B []byte -} - -func marshalUint64(dst []byte, u uint64) []byte { - return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) -} - -func unmarshalUint64(src []byte) uint64 { - _ = src[7] - return uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7]) -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go b/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go deleted file mode 100644 index b83fd2a..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go +++ /dev/null @@ -1,415 +0,0 @@ -// Package fastcache implements fast in-memory cache. -// -// The package has been extracted from https://victoriametrics.com/ -package fastcache - -import ( - "fmt" - "sync" - "sync/atomic" - - xxhash "github.com/cespare/xxhash/v2" -) - -const bucketsCount = 512 - -const chunkSize = 64 * 1024 - -const bucketSizeBits = 40 - -const genSizeBits = 64 - bucketSizeBits - -const maxGen = 1<= maxBucketSize { - panic(fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize)) - } - maxChunks := (maxBytes + chunkSize - 1) / chunkSize - b.chunks = make([][]byte, maxChunks) - b.m = make(map[uint64]uint64) - b.Reset() -} - -func (b *bucket) Reset() { - b.mu.Lock() - chunks := b.chunks - for i := range chunks { - putChunk(chunks[i]) - chunks[i] = nil - } - bm := b.m - for k := range bm { - delete(bm, k) - } - b.idx = 0 - b.gen = 1 - atomic.StoreUint64(&b.getCalls, 0) - atomic.StoreUint64(&b.setCalls, 0) - atomic.StoreUint64(&b.misses, 0) - atomic.StoreUint64(&b.collisions, 0) - atomic.StoreUint64(&b.corruptions, 0) - b.mu.Unlock() -} - -func (b *bucket) Clean() { - b.mu.Lock() - bGen := b.gen & ((1 << genSizeBits) - 1) - bIdx := b.idx - bm := b.m - for k, v := range bm { - gen := v >> bucketSizeBits - idx := v & ((1 << bucketSizeBits) - 1) - if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx { - continue - } - delete(bm, k) - } - b.mu.Unlock() -} - -func (b *bucket) UpdateStats(s *Stats) { - s.GetCalls += atomic.LoadUint64(&b.getCalls) - s.SetCalls += atomic.LoadUint64(&b.setCalls) - s.Misses += atomic.LoadUint64(&b.misses) - s.Collisions += atomic.LoadUint64(&b.collisions) - s.Corruptions += atomic.LoadUint64(&b.corruptions) - - b.mu.RLock() - s.EntriesCount += uint64(len(b.m)) - for _, chunk := range b.chunks { - s.BytesSize += uint64(cap(chunk)) - } - b.mu.RUnlock() -} - -func (b *bucket) Set(k, v []byte, h uint64) { - setCalls := atomic.AddUint64(&b.setCalls, 1) - if setCalls%(1<<14) == 0 { - b.Clean() - } - - if len(k) >= (1<<16) || len(v) >= (1<<16) { - // Too big key or value - its length cannot be encoded - // with 2 bytes (see below). Skip the entry. - return - } - var kvLenBuf [4]byte - kvLenBuf[0] = byte(uint16(len(k)) >> 8) - kvLenBuf[1] = byte(len(k)) - kvLenBuf[2] = byte(uint16(len(v)) >> 8) - kvLenBuf[3] = byte(len(v)) - kvLen := uint64(len(kvLenBuf) + len(k) + len(v)) - if kvLen >= chunkSize { - // Do not store too big keys and values, since they do not - // fit a chunk. - return - } - - b.mu.Lock() - idx := b.idx - idxNew := idx + kvLen - chunkIdx := idx / chunkSize - chunkIdxNew := idxNew / chunkSize - if chunkIdxNew > chunkIdx { - if chunkIdxNew >= uint64(len(b.chunks)) { - idx = 0 - idxNew = kvLen - chunkIdx = 0 - b.gen++ - if b.gen&((1< 0 { - gen := v >> bucketSizeBits - idx := v & ((1 << bucketSizeBits) - 1) - if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx { - chunkIdx := idx / chunkSize - if chunkIdx >= uint64(len(b.chunks)) { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - chunk := b.chunks[chunkIdx] - idx %= chunkSize - if idx+4 >= chunkSize { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - kvLenBuf := chunk[idx : idx+4] - keyLen := (uint64(kvLenBuf[0]) << 8) | uint64(kvLenBuf[1]) - valLen := (uint64(kvLenBuf[2]) << 8) | uint64(kvLenBuf[3]) - idx += 4 - if idx+keyLen+valLen >= chunkSize { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - if string(k) == string(chunk[idx:idx+keyLen]) { - idx += keyLen - if returnDst { - dst = append(dst, chunk[idx:idx+valLen]...) - } - found = true - } else { - atomic.AddUint64(&b.collisions, 1) - } - } - } -end: - b.mu.RUnlock() - if !found { - atomic.AddUint64(&b.misses, 1) - } - return dst, found -} - -func (b *bucket) Del(h uint64) { - b.mu.Lock() - delete(b.m, h) - b.mu.Unlock() -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/file.go b/vendor/github.com/VictoriaMetrics/fastcache/file.go deleted file mode 100644 index de1bb07..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/file.go +++ /dev/null @@ -1,400 +0,0 @@ -package fastcache - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - - "github.com/golang/snappy" -) - -// SaveToFile atomically saves cache data to the given filePath using a single -// CPU core. -// -// SaveToFile may be called concurrently with other operations on the cache. -// -// The saved data may be loaded with LoadFromFile*. -// -// See also SaveToFileConcurrent for faster saving to file. -func (c *Cache) SaveToFile(filePath string) error { - return c.SaveToFileConcurrent(filePath, 1) -} - -// SaveToFileConcurrent saves cache data to the given filePath using concurrency -// CPU cores. -// -// SaveToFileConcurrent may be called concurrently with other operations -// on the cache. -// -// The saved data may be loaded with LoadFromFile*. -// -// See also SaveToFile. -func (c *Cache) SaveToFileConcurrent(filePath string, concurrency int) error { - // Create dir if it doesn't exist. - dir := filepath.Dir(filePath) - if _, err := os.Stat(dir); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("cannot stat %q: %s", dir, err) - } - if err := os.MkdirAll(dir, 0755); err != nil { - return fmt.Errorf("cannot create dir %q: %s", dir, err) - } - } - - // Save cache data into a temporary directory. - tmpDir, err := ioutil.TempDir(dir, "fastcache.tmp.") - if err != nil { - return fmt.Errorf("cannot create temporary dir inside %q: %s", dir, err) - } - defer func() { - if tmpDir != "" { - _ = os.RemoveAll(tmpDir) - } - }() - gomaxprocs := runtime.GOMAXPROCS(-1) - if concurrency <= 0 || concurrency > gomaxprocs { - concurrency = gomaxprocs - } - if err := c.save(tmpDir, concurrency); err != nil { - return fmt.Errorf("cannot save cache data to temporary dir %q: %s", tmpDir, err) - } - - // Remove old filePath contents, since os.Rename may return - // error if filePath dir exists. - if err := os.RemoveAll(filePath); err != nil { - return fmt.Errorf("cannot remove old contents at %q: %s", filePath, err) - } - if err := os.Rename(tmpDir, filePath); err != nil { - return fmt.Errorf("cannot move temporary dir %q to %q: %s", tmpDir, filePath, err) - } - tmpDir = "" - return nil -} - -// LoadFromFile loads cache data from the given filePath. -// -// See SaveToFile* for saving cache data to file. -func LoadFromFile(filePath string) (*Cache, error) { - return load(filePath, 0) -} - -// LoadFromFileOrNew tries loading cache data from the given filePath. -// -// The function falls back to creating new cache with the given maxBytes -// capacity if error occurs during loading the cache from file. -func LoadFromFileOrNew(filePath string, maxBytes int) *Cache { - c, err := load(filePath, maxBytes) - if err == nil { - return c - } - return New(maxBytes) -} - -func (c *Cache) save(dir string, workersCount int) error { - if err := saveMetadata(c, dir); err != nil { - return err - } - - // Save buckets by workersCount concurrent workers. - workCh := make(chan int, workersCount) - results := make(chan error) - for i := 0; i < workersCount; i++ { - go func(workerNum int) { - results <- saveBuckets(c.buckets[:], workCh, dir, workerNum) - }(i) - } - // Feed workers with work - for i := range c.buckets[:] { - workCh <- i - } - close(workCh) - - // Read results. - var err error - for i := 0; i < workersCount; i++ { - result := <-results - if result != nil && err != nil { - err = result - } - } - return err -} - -func load(filePath string, maxBytes int) (*Cache, error) { - maxBucketChunks, err := loadMetadata(filePath) - if err != nil { - return nil, err - } - if maxBytes > 0 { - maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount) - expectedBucketChunks := (maxBucketBytes + chunkSize - 1) / chunkSize - if maxBucketChunks != expectedBucketChunks { - return nil, fmt.Errorf("cache file %s contains maxBytes=%d; want %d", filePath, maxBytes, expectedBucketChunks*chunkSize*bucketsCount) - } - } - - // Read bucket files from filePath dir. - d, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", filePath, err) - } - defer func() { - _ = d.Close() - }() - fis, err := d.Readdir(-1) - if err != nil { - return nil, fmt.Errorf("cannot read files from %q: %s", filePath, err) - } - results := make(chan error) - workersCount := 0 - var c Cache - for _, fi := range fis { - fn := fi.Name() - if fi.IsDir() || !dataFileRegexp.MatchString(fn) { - continue - } - workersCount++ - go func(dataPath string) { - results <- loadBuckets(c.buckets[:], dataPath, maxBucketChunks) - }(filePath + "/" + fn) - } - err = nil - for i := 0; i < workersCount; i++ { - result := <-results - if result != nil && err == nil { - err = result - } - } - if err != nil { - return nil, err - } - return &c, nil -} - -func saveMetadata(c *Cache, dir string) error { - metadataPath := dir + "/metadata.bin" - metadataFile, err := os.Create(metadataPath) - if err != nil { - return fmt.Errorf("cannot create %q: %s", metadataPath, err) - } - defer func() { - _ = metadataFile.Close() - }() - maxBucketChunks := uint64(cap(c.buckets[0].chunks)) - if err := writeUint64(metadataFile, maxBucketChunks); err != nil { - return fmt.Errorf("cannot write maxBucketChunks=%d to %q: %s", maxBucketChunks, metadataPath, err) - } - return nil -} - -func loadMetadata(dir string) (uint64, error) { - metadataPath := dir + "/metadata.bin" - metadataFile, err := os.Open(metadataPath) - if err != nil { - return 0, fmt.Errorf("cannot open %q: %s", metadataPath, err) - } - defer func() { - _ = metadataFile.Close() - }() - maxBucketChunks, err := readUint64(metadataFile) - if err != nil { - return 0, fmt.Errorf("cannot read maxBucketChunks from %q: %s", metadataPath, err) - } - return maxBucketChunks, nil -} - -var dataFileRegexp = regexp.MustCompile(`^data\.\d+\.bin$`) - -func saveBuckets(buckets []bucket, workCh <-chan int, dir string, workerNum int) error { - dataPath := fmt.Sprintf("%s/data.%d.bin", dir, workerNum) - dataFile, err := os.Create(dataPath) - if err != nil { - return fmt.Errorf("cannot create %q: %s", dataPath, err) - } - defer func() { - _ = dataFile.Close() - }() - zw := snappy.NewBufferedWriter(dataFile) - for bucketNum := range workCh { - if err := writeUint64(zw, uint64(bucketNum)); err != nil { - return fmt.Errorf("cannot write bucketNum=%d to %q: %s", bucketNum, dataPath, err) - } - if err := buckets[bucketNum].Save(zw); err != nil { - return fmt.Errorf("cannot save bucket[%d] to %q: %s", bucketNum, dataPath, err) - } - } - if err := zw.Close(); err != nil { - return fmt.Errorf("cannot close snappy.Writer for %q: %s", dataPath, err) - } - return nil -} - -func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error { - dataFile, err := os.Open(dataPath) - if err != nil { - return fmt.Errorf("cannot open %q: %s", dataPath, err) - } - defer func() { - _ = dataFile.Close() - }() - zr := snappy.NewReader(dataFile) - for { - bucketNum, err := readUint64(zr) - if err == io.EOF { - // Reached the end of file. - return nil - } - if bucketNum >= uint64(len(buckets)) { - return fmt.Errorf("unexpected bucketNum read from %q: %d; must be smaller than %d", dataPath, bucketNum, len(buckets)) - } - if err := buckets[bucketNum].Load(zr, maxChunks); err != nil { - return fmt.Errorf("cannot load bucket[%d] from %q: %s", bucketNum, dataPath, err) - } - } -} - -func (b *bucket) Save(w io.Writer) error { - b.Clean() - - b.mu.RLock() - defer b.mu.RUnlock() - - // Store b.idx, b.gen and b.m to w. - - bIdx := b.idx - bGen := b.gen - chunksLen := 0 - for _, chunk := range b.chunks { - if chunk == nil { - break - } - chunksLen++ - } - kvs := make([]byte, 0, 2*8*len(b.m)) - var u64Buf [8]byte - for k, v := range b.m { - binary.LittleEndian.PutUint64(u64Buf[:], k) - kvs = append(kvs, u64Buf[:]...) - binary.LittleEndian.PutUint64(u64Buf[:], v) - kvs = append(kvs, u64Buf[:]...) - } - - if err := writeUint64(w, bIdx); err != nil { - return fmt.Errorf("cannot write b.idx: %s", err) - } - if err := writeUint64(w, bGen); err != nil { - return fmt.Errorf("cannot write b.gen: %s", err) - } - if err := writeUint64(w, uint64(len(kvs))/2/8); err != nil { - return fmt.Errorf("cannot write len(b.m): %s", err) - } - if _, err := w.Write(kvs); err != nil { - return fmt.Errorf("cannot write b.m: %s", err) - } - - // Store b.chunks to w. - if err := writeUint64(w, uint64(chunksLen)); err != nil { - return fmt.Errorf("cannot write len(b.chunks): %s", err) - } - for chunkIdx := 0; chunkIdx < chunksLen; chunkIdx++ { - chunk := b.chunks[chunkIdx][:chunkSize] - if _, err := w.Write(chunk); err != nil { - return fmt.Errorf("cannot write b.chunks[%d]: %s", chunkIdx, err) - } - } - - return nil -} - -func (b *bucket) Load(r io.Reader, maxChunks uint64) error { - if maxChunks == 0 { - return fmt.Errorf("the number of chunks per bucket cannot be zero") - } - bIdx, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read b.idx: %s", err) - } - bGen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read b.gen: %s", err) - } - kvsLen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read len(b.m): %s", err) - } - kvsLen *= 2 * 8 - kvs := make([]byte, kvsLen) - if _, err := io.ReadFull(r, kvs); err != nil { - return fmt.Errorf("cannot read b.m: %s", err) - } - m := make(map[uint64]uint64, kvsLen/2/8) - for len(kvs) > 0 { - k := binary.LittleEndian.Uint64(kvs) - kvs = kvs[8:] - v := binary.LittleEndian.Uint64(kvs) - kvs = kvs[8:] - m[k] = v - } - - maxBytes := maxChunks * chunkSize - if maxBytes >= maxBucketSize { - return fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize) - } - chunks := make([][]byte, maxChunks) - chunksLen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read len(b.chunks): %s", err) - } - if chunksLen > uint64(maxChunks) { - return fmt.Errorf("chunksLen=%d cannot exceed maxChunks=%d", chunksLen, maxChunks) - } - currChunkIdx := bIdx / chunkSize - if currChunkIdx > 0 && currChunkIdx >= chunksLen { - return fmt.Errorf("too big bIdx=%d; should be smaller than %d", bIdx, chunksLen*chunkSize) - } - for chunkIdx := uint64(0); chunkIdx < chunksLen; chunkIdx++ { - chunk := getChunk() - if _, err := io.ReadFull(r, chunk); err != nil { - return fmt.Errorf("cannot read b.chunks[%d]: %s", chunkIdx, err) - } - chunks[chunkIdx] = chunk - } - // Adjust len for the chunk pointed by currChunkIdx. - if chunksLen > 0 { - chunkLen := bIdx % chunkSize - chunks[currChunkIdx] = chunks[currChunkIdx][:chunkLen] - } - - b.mu.Lock() - for _, chunk := range b.chunks { - putChunk(chunk) - } - b.chunks = chunks - b.m = m - b.idx = bIdx - b.gen = bGen - b.mu.Unlock() - - return nil -} - -func writeUint64(w io.Writer, u uint64) error { - var u64Buf [8]byte - binary.LittleEndian.PutUint64(u64Buf[:], u) - _, err := w.Write(u64Buf[:]) - return err -} - -func readUint64(r io.Reader) (uint64, error) { - var u64Buf [8]byte - if _, err := io.ReadFull(r, u64Buf[:]); err != nil { - return 0, err - } - u := binary.LittleEndian.Uint64(u64Buf[:]) - return u, nil -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/go.mod b/vendor/github.com/VictoriaMetrics/fastcache/go.mod deleted file mode 100644 index 9b0693f..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/VictoriaMetrics/fastcache - -require ( - github.com/OneOfOne/xxhash v1.2.5 // indirect - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 - github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/golang/snappy v0.0.1 - github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 // indirect - github.com/stretchr/testify v1.3.0 // indirect -) diff --git a/vendor/github.com/VictoriaMetrics/fastcache/go.sum b/vendor/github.com/VictoriaMetrics/fastcache/go.sum deleted file mode 100644 index 3ff9edb..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/go.sum +++ /dev/null @@ -1,24 +0,0 @@ -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 h1:5Cp3cVwpQP4aCQ6jx6dNLP3IarbYiuStmIzYu+BjQwY= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go b/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go deleted file mode 100644 index 79a7183..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine windows - -package fastcache - -func getChunk() []byte { - return make([]byte, chunkSize) -} - -func putChunk(chunk []byte) { - // No-op. -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go b/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go deleted file mode 100644 index 424b79b..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build !appengine,!windows - -package fastcache - -import ( - "fmt" - "sync" - "syscall" - "unsafe" -) - -const chunksPerAlloc = 1024 - -var ( - freeChunks []*[chunkSize]byte - freeChunksLock sync.Mutex -) - -func getChunk() []byte { - freeChunksLock.Lock() - if len(freeChunks) == 0 { - // Allocate offheap memory, so GOGC won't take into account cache size. - // This should reduce free memory waste. - data, err := syscall.Mmap(-1, 0, chunkSize*chunksPerAlloc, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE) - if err != nil { - panic(fmt.Errorf("cannot allocate %d bytes via mmap: %s", chunkSize*chunksPerAlloc, err)) - } - for len(data) > 0 { - p := (*[chunkSize]byte)(unsafe.Pointer(&data[0])) - freeChunks = append(freeChunks, p) - data = data[chunkSize:] - } - } - n := len(freeChunks) - 1 - p := freeChunks[n] - freeChunks[n] = nil - freeChunks = freeChunks[:n] - freeChunksLock.Unlock() - return p[:] -} - -func putChunk(chunk []byte) { - if chunk == nil { - return - } - chunk = chunk[:chunkSize] - p := (*[chunkSize]byte)(unsafe.Pointer(&chunk[0])) - - freeChunksLock.Lock() - freeChunks = append(freeChunks, p) - freeChunksLock.Unlock() -} diff --git a/vendor/github.com/aristanetworks/goarista/AUTHORS b/vendor/github.com/aristanetworks/goarista/AUTHORS deleted file mode 100644 index 5bb93cb..0000000 --- a/vendor/github.com/aristanetworks/goarista/AUTHORS +++ /dev/null @@ -1,25 +0,0 @@ -All contributors are required to sign a "Contributor License Agreement" at - - -The following organizations and people have contributed code to this library. -(Please keep both lists sorted alphabetically.) - - -Arista Networks, Inc. - - -Benoit Sigoure -Fabrice Rabaute - - - -The list of individual contributors for code currently in HEAD can be obtained -at any time with the following script: - -find . -type f \ -| while read i; do \ - git blame -t $i 2>/dev/null; \ - done \ -| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \ -| awk '{a[$0]++; t++} END{for(n in a) print n}' \ -| sort diff --git a/vendor/github.com/aristanetworks/goarista/COPYING b/vendor/github.com/aristanetworks/goarista/COPYING deleted file mode 100644 index f433b1a..0000000 --- a/vendor/github.com/aristanetworks/goarista/COPYING +++ /dev/null @@ -1,177 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s b/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s deleted file mode 100644 index 66109f4..0000000 --- a/vendor/github.com/aristanetworks/goarista/monotime/issue15006.s +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (C) 2016 Arista Networks, Inc. -// Use of this source code is governed by the Apache License 2.0 -// that can be found in the COPYING file. - -// This file is intentionally empty. -// It's a workaround for https://github.com/golang/go/issues/15006 diff --git a/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go b/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go deleted file mode 100644 index 5f5fbc7..0000000 --- a/vendor/github.com/aristanetworks/goarista/monotime/nanotime.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2016 Arista Networks, Inc. -// Use of this source code is governed by the Apache License 2.0 -// that can be found in the COPYING file. - -// Package monotime provides a fast monotonic clock source. -package monotime - -import ( - "time" - _ "unsafe" // required to use //go:linkname -) - -//go:noescape -//go:linkname nanotime runtime.nanotime -func nanotime() int64 - -// Now returns the current time in nanoseconds from a monotonic clock. -// The time returned is based on some arbitrary platform-specific point in the -// past. The time returned is guaranteed to increase monotonically at a -// constant rate, unlike time.Now() from the Go standard library, which may -// slow down, speed up, jump forward or backward, due to NTP activity or leap -// seconds. -func Now() uint64 { - return uint64(nanotime()) -} - -// Since returns the amount of time that has elapsed since t. t should be -// the result of a call to Now() on the same machine. -func Since(t uint64) time.Duration { - return time.Duration(Now() - t) -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177b..0000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287..0000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8..0000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml deleted file mode 100644 index 102fb9a..0000000 --- a/vendor/github.com/blang/semver/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -matrix: - include: - - go: 1.4.3 - - go: 1.5.4 - - go: 1.6.3 - - go: 1.7 - - go: tip - allow_failures: - - go: tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken $COVERALLS_TOKEN -- echo "Build examples" ; cd examples && go build -- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) -env: - global: - secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86..0000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a..0000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c..0000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebd..0000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d..0000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842..0000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880..0000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802..0000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7..0000000 --- a/vendor/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77f..0000000 --- a/vendor/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63..0000000 --- a/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md deleted file mode 100644 index 7d43a15..0000000 --- a/vendor/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,916 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml deleted file mode 100644 index 6e26e94..0000000 --- a/vendor/github.com/boltdb/bolt/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index 820d533..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index 98fafdb..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index 7e5cb4b..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go deleted file mode 100644 index b26d84f..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index 2b67666..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7058c3d..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go deleted file mode 100644 index 645ddc3..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go deleted file mode 100644 index 9331d97..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go deleted file mode 100644 index 8c143bc..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go deleted file mode 100644 index d7c39af..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index cad62dd..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !windows,!plan9,!solaris - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go deleted file mode 100644 index 307bf2b..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ /dev/null @@ -1,90 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index b00fb07..0000000 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,144 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - lockExt = ".lock" - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path + lockExt) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index f504425..0000000 --- a/vendor/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index 0c5bf27..0000000 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,777 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 1be9f35..0000000 --- a/vendor/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,400 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go deleted file mode 100644 index f352ff1..0000000 --- a/vendor/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,1039 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { - db.pageSize = int(m.pageSize) - } - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc93784..0000000 --- a/vendor/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go deleted file mode 100644 index a3620a3..0000000 --- a/vendor/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index aba48f5..0000000 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,252 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, list := range f.pending { - m = append(m, list...) - } - sort.Sort(m) - mergepgids(dst, f.ids, m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - } - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - lenids := f.count() - if lenids == 0 { - p.count = uint16(lenids) - } else if lenids < 0xFFFF { - p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool, len(f.ids)) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go deleted file mode 100644 index 159318b..0000000 --- a/vendor/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,604 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go deleted file mode 100644 index cde403a..0000000 --- a/vendor/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,197 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 6700308..0000000 --- a/vendor/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,684 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { _ = f.Close() }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 0026861..0000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46..0000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b8179..0000000 --- a/vendor/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 55ebc98..0000000 --- a/vendor/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 3676ee4..0000000 --- a/vendor/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/context.go deleted file mode 100644 index 7706faa..0000000 --- a/vendor/github.com/cenkalti/backoff/context.go +++ /dev/null @@ -1,63 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func ensureContext(b BackOff) BackOffContext { - if cb, ok := b.(BackOffContext); ok { - return cb - } - return WithContext(b, context.Background()) -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { - return Stop - } - return next -} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index a031a65..0000000 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,153 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index e936a50..0000000 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - var t *time.Timer - - cb := ensureContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err - } - - if next = cb.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } - - select { - case <-cb.Context().Done(): - return err - case <-t.C: - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index e41084b..0000000 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import ( - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOffContext - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.b.Context().Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/tries.go deleted file mode 100644 index cfeefd9..0000000 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea8..0000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693..0000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod deleted file mode 100644 index 49f6760..0000000 --- a/vendor/github.com/cespare/xxhash/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/cespare/xxhash/v2 - -go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35f..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b80..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76e..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/vendor/github.com/elastic/gosigar/.appveyor.yml b/vendor/github.com/elastic/gosigar/.appveyor.yml deleted file mode 100644 index ff46708..0000000 --- a/vendor/github.com/elastic/gosigar/.appveyor.yml +++ /dev/null @@ -1,84 +0,0 @@ -# Version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Environment variables -environment: - GOPATH: c:\gopath - GVM_GO_VERSION: 1.8.3 - GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.0.1/gvm-windows-amd64.exe - -# Custom clone folder (variables are not expanded here). -clone_folder: c:\gopath\src\github.com\elastic\gosigar - -# Cache mingw install until appveyor.yml is modified. -cache: -- C:\ProgramData\chocolatey\bin -> .appveyor.yml -- C:\ProgramData\chocolatey\lib -> .appveyor.yml -- C:\Users\appveyor\.gvm -> .appveyor.yml -- C:\Windows\System32\gvm.exe -> .appveyor.yml -- C:\tools\mingw64 -> .appveyor.yml - -# Scripts that run after cloning repository -install: - - ps: >- - if(!(Test-Path "C:\Windows\System32\gvm.exe")) { - wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe - } - - ps: gvm --format=powershell "$env:GVM_GO_VERSION" | Invoke-Expression - # AppVeyor installed mingw is 32-bit only so install 64-bit version. - - ps: >- - if(!(Test-Path "C:\tools\mingw64\bin\gcc.exe")) { - cinst mingw > mingw-install.txt - Push-AppveyorArtifact mingw-install.txt - } - - set PATH=C:\tools\mingw64\bin;%GOROOT%\bin;%PATH% - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - - python --version - - go get github.com/elastic/beats/vendor/github.com/pierrre/gotestcover - -# To run your custom scripts instead of automatic MSBuild -build_script: - # Compile - - appveyor AddCompilationMessage "Starting Compile" - - cd c:\gopath\src\github.com\elastic\gosigar - - go get -v -t -d ./... - - go build - - go build -o examples/df/df.exe ./examples/df - - go build -o examples/free/free.exe ./examples/free - - go build -o examples/ps/ps.exe ./examples/ps - - go build -o examples/uptime/uptime.exe ./examples/uptime - - appveyor AddCompilationMessage "Compile Success" - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - mkdir build\coverage - - gotestcover -v -coverprofile=build/coverage/unit.cov github.com/elastic/gosigar/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - - ps: Add-AppveyorTest "Running Examples" -Outcome Running - - .\examples\df\df.exe - - .\examples\free\free.exe - - .\examples\ps\ps.exe - - .\examples\uptime\uptime.exe - - ps: Update-AppveyorTest "Running Examples" -Outcome Passed - -after_test: - - go tool cover -html=build\coverage\unit.cov -o build\coverage\unit.html - - ps: Push-AppveyorArtifact build\coverage\unit.cov - - ps: Push-AppveyorArtifact build\coverage\unit.html - # Upload coverage report. - - "SET PATH=C:\\Python34;C:\\Python34\\Scripts;%PATH%" - - pip install codecov - - codecov -X gcov -f "build\coverage\unit.cov" - -# To disable deployment -deploy: off - -# Notifications should only be setup using the AppVeyor UI so that -# forks can be created without inheriting the settings. diff --git a/vendor/github.com/elastic/gosigar/.gitignore b/vendor/github.com/elastic/gosigar/.gitignore deleted file mode 100644 index 6f14505..0000000 --- a/vendor/github.com/elastic/gosigar/.gitignore +++ /dev/null @@ -1,41 +0,0 @@ -# Directories -/.vagrant -/.idea -/build - -# Files -.DS_Store -/*.iml -*.h - -# Editor swap files -*.swp -*.swo -*.swn - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.exe -*.test -*.prof -*.pyc -*.swp - -# Example binaries -examples/df/df -examples/df/df.exe -examples/free/free -examples/free/free.exe -examples/ps/ps -examples/ps/ps.exe -examples/ss/ss -examples/ss/ss.exe -examples/uptime/uptime -examples/uptime/uptime.exe - -# Test Data -cgroup/testdata/* -!cgroup/testdata/*.zip - diff --git a/vendor/github.com/elastic/gosigar/.travis.yml b/vendor/github.com/elastic/gosigar/.travis.yml deleted file mode 100644 index 30f58bf..0000000 --- a/vendor/github.com/elastic/gosigar/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.8.3 - -env: - global: - - PROJ="github.com/elastic/gosigar" - -sudo: false - -before_install: - # Put project into proper GOPATH location (important for forks). - - mkdir -p $HOME/gopath/src/${PROJ} - - rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/${PROJ}/ - - export TRAVIS_BUILD_DIR=$HOME/gopath/src/${PROJ} - - cd $HOME/gopath/src/${PROJ} - -install: - - go get -v -t -d ./... - - go get github.com/elastic/beats/vendor/github.com/pierrre/gotestcover - -script: - - gofmt -l . | read && echo "Code differs from gofmt's style. Run 'gofmt -w .'" 1>&2 && exit 1 || true - - go vet - - go build - - mkdir -p build/coverage - - gotestcover -v -coverprofile=build/coverage/unit.cov github.com/elastic/gosigar/... - - for i in $(ls examples); do go build -o examples/$i/$i ./examples/$i; ./examples/$i/$i; done - -after_success: - - bash <(curl -s https://codecov.io/bash) -f build/coverage/unit.cov diff --git a/vendor/github.com/elastic/gosigar/CHANGELOG.md b/vendor/github.com/elastic/gosigar/CHANGELOG.md deleted file mode 100644 index 45262e7..0000000 --- a/vendor/github.com/elastic/gosigar/CHANGELOG.md +++ /dev/null @@ -1,113 +0,0 @@ -# Change Log -All notable changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/). - -## [Unreleased] - -### Added - -### Fixed - -- Added missing runtime import for FreeBSD. #104 - -### Changed - -### Deprecated - -## [0.9.0] - -### Added -- Added support for huge TLB pages on Linux #97 -- Added support for big endian platform #100 - -### Fixed -- Add missing method for OpenBSD #99 - -## [0.8.0] - -### Added -- Added partial `getrusage` support for Windows to retrieve system CPU time and user CPU time. #95 -- Added full `getrusage` support for Unix. #95 - -## [0.7.0] - -### Added -- Added method stubs for process handling for operating system that are not supported - by gosigar. All methods return `ErrNotImplemented` on such systems. #88 - -### Fixed -- Fix freebsd build by using the common version of Get(pid). #91 - -### Changed -- Fixed issues in cgroup package by adding missing error checks and closing - file handles. #92 - -## [0.6.0] - -### Added -- Added method stubs to enable compilation for operating systems that are not - supported by gosigar. All methods return `ErrNotImplemented` on these unsupported - operating systems. #83 -- FreeBSD returns `ErrNotImplemented` for `ProcTime.Get`. #83 - -### Changed -- OpenBSD returns `ErrNotImplemented` for `ProcTime.Get` instead of `nil`. #83 -- Fixed incorrect `Mem.Used` calculation under linux. #82 -- Fixed `ProcState` on Linux and FreeBSD when process names contain parentheses. #81 - -### Removed -- Remove NetBSD build from sigar_unix.go as it is not supported by gosigar. #83 - -## [0.5.0] - -### Changed -- Fixed Trim environment variables when comparing values in the test suite. #79 -- Make `kern_procargs` more robust under darwin when we cannot retrieve - all the information about a process. #78 - -## [0.4.0] - -### Changed -- Fixed Windows issue that caused a hang during `init()` if WMI wasn't ready. #74 - -## [0.3.0] - -### Added -- Read `MemAvailable` value for kernel 3.14+ #71 - -## [0.2.0] - -### Added -- Added `ErrCgroupsMissing` to indicate that /proc/cgroups is missing which is - an indicator that cgroups were disabled at compile time. #64 - -### Changed -- Changed `cgroup.SupportedSubsystems()` to honor the "enabled" column in the - /proc/cgroups file. #64 - -## [0.1.0] - -### Added -- Added `CpuList` implementation for Windows that returns CPU timing information - on a per CPU basis. #55 -- Added `Uptime` implementation for Windows. #55 -- Added `Swap` implementation for Windows based on page file metrics. #55 -- Added support to `github.com/gosigar/sys/windows` for querying and enabling - privileges in a process token. -- Added utility code for interfacing with linux NETLINK_INET_DIAG. #60 -- Added `ProcEnv` for getting a process's environment variables. #61 - -### Changed -- Changed several `OpenProcess` calls on Windows to request the lowest possible - access privileges. #50 -- Removed cgo usage from Windows code. -- Added OS version checks to `ProcArgs.Get` on Windows because the - `Win32_Process` WMI query is not available prior to Windows vista. On XP and - Windows 2003, this method returns `ErrNotImplemented`. #55 - -### Fixed -- Fixed value of `Mem.ActualFree` and `Mem.ActualUsed` on Windows. #49 -- Fixed `ProcTime.StartTime` on Windows to report value in milliseconds since - Unix epoch. #51 -- Fixed `ProcStatus.PPID` value is wrong on Windows. #55 -- Fixed `ProcStatus.Username` error on Windows XP #56 diff --git a/vendor/github.com/elastic/gosigar/LICENSE b/vendor/github.com/elastic/gosigar/LICENSE deleted file mode 100644 index 11069ed..0000000 --- a/vendor/github.com/elastic/gosigar/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/elastic/gosigar/NOTICE b/vendor/github.com/elastic/gosigar/NOTICE deleted file mode 100644 index fda553b..0000000 --- a/vendor/github.com/elastic/gosigar/NOTICE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. - -This product is licensed to you under the Apache License, Version 2.0 (the "License"). -You may not use this product except in compliance with the License. - -This product includes a number of subcomponents with -separate copyright notices and license terms. Your use of these -subcomponents is subject to the terms and conditions of the -subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/vendor/github.com/elastic/gosigar/README.md b/vendor/github.com/elastic/gosigar/README.md deleted file mode 100644 index ecdfc1c..0000000 --- a/vendor/github.com/elastic/gosigar/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Go sigar [![Build Status](https://travis-ci.org/elastic/gosigar.svg?branch=master)](https://travis-ci.org/elastic/gosigar) [![Build status](https://ci.appveyor.com/api/projects/status/4yh6sa7u97ek5uib/branch/master?svg=true)](https://ci.appveyor.com/project/elastic-beats/gosigar/branch/master) - - -## Overview - -Go sigar is a golang implementation of the -[sigar API](https://github.com/hyperic/sigar). The Go version of -sigar has a very similar interface, but is being written from scratch -in pure go/cgo, rather than cgo bindings for libsigar. - -## Test drive - - $ go get github.com/elastic/gosigar - $ cd $GOPATH/src/github.com/elastic/gosigar/examples/ps - $ go build - $ ./ps - -## Supported platforms - -The features vary by operating system. - -| Feature | Linux | Darwin | Windows | OpenBSD | FreeBSD | -|-----------------|:-----:|:------:|:-------:|:-------:|:-------:| -| Cpu | X | X | X | X | X | -| CpuList | X | X | | X | X | -| FDUsage | X | | | | X | -| FileSystemList | X | X | X | X | X | -| FileSystemUsage | X | X | X | X | X | -| HugeTLBPages | X | | | | | -| LoadAverage | X | X | | X | X | -| Mem | X | X | X | X | X | -| ProcArgs | X | X | X | | X | -| ProcEnv | X | X | | | X | -| ProcExe | X | X | | | X | -| ProcFDUsage | X | | | | X | -| ProcList | X | X | X | | X | -| ProcMem | X | X | X | | X | -| ProcState | X | X | X | | X | -| ProcTime | X | X | X | | X | -| Swap | X | X | | X | X | -| Uptime | X | X | | X | X | - -## OS Specific Notes - -### FreeBSD - -Mount both `linprocfs` and `procfs` for compatability. Consider adding these -mounts to your `/etc/fstab` file so they are mounted automatically at boot. - -``` -sudo mount -t procfs proc /proc -sudo mkdir -p /compat/linux/proc -sudo mount -t linprocfs /dev/null /compat/linux/proc -``` - -## License - -Apache 2.0 diff --git a/vendor/github.com/elastic/gosigar/Vagrantfile b/vendor/github.com/elastic/gosigar/Vagrantfile deleted file mode 100644 index 6fd990c..0000000 --- a/vendor/github.com/elastic/gosigar/Vagrantfile +++ /dev/null @@ -1,25 +0,0 @@ -# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "hashicorp/precise64" - config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" - config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" - config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" - install_go = <<-BASH - set -e - -if [ ! -d "/usr/local/go" ]; then - cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz - cd /usr/local - tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz - echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc -fi -export GOPATH=/home/vagrant/go -export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin -/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo -/usr/local/go/bin/go get -u github.com/onsi/gomega; -BASH - config.vm.provision "shell", inline: 'apt-get install -y git-core' - config.vm.provision "shell", inline: install_go -end diff --git a/vendor/github.com/elastic/gosigar/codecov.yml b/vendor/github.com/elastic/gosigar/codecov.yml deleted file mode 100644 index 76ade0f..0000000 --- a/vendor/github.com/elastic/gosigar/codecov.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Enable coverage report message for diff on commit -coverage: - status: - project: off - patch: - default: - # basic - target: auto - threshold: null - base: auto - # advanced - branches: null - if_no_uploads: error - if_not_found: success - if_ci_failed: error - only_pulls: false - flags: null - paths: null - -# Disable comments on Pull Requests -comment: false diff --git a/vendor/github.com/elastic/gosigar/concrete_sigar.go b/vendor/github.com/elastic/gosigar/concrete_sigar.go deleted file mode 100644 index e3ee80a..0000000 --- a/vendor/github.com/elastic/gosigar/concrete_sigar.go +++ /dev/null @@ -1,89 +0,0 @@ -package gosigar - -import ( - "time" -) - -type ConcreteSigar struct{} - -func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { - // samplesCh is buffered to 1 value to immediately return first CPU sample - samplesCh := make(chan Cpu, 1) - - stopCh := make(chan struct{}) - - go func() { - var cpuUsage Cpu - - // Immediately provide non-delta value. - // samplesCh is buffered to 1 value, so it will not block. - cpuUsage.Get() - samplesCh <- cpuUsage - - ticker := time.NewTicker(collectionInterval) - - for { - select { - case <-ticker.C: - previousCpuUsage := cpuUsage - - cpuUsage.Get() - - select { - case samplesCh <- cpuUsage.Delta(previousCpuUsage): - default: - // Include default to avoid channel blocking - } - - case <-stopCh: - return - } - } - }() - - return samplesCh, stopCh -} - -func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { - l := LoadAverage{} - err := l.Get() - return l, err -} - -func (c *ConcreteSigar) GetMem() (Mem, error) { - m := Mem{} - err := m.Get() - return m, err -} - -func (c *ConcreteSigar) GetSwap() (Swap, error) { - s := Swap{} - err := s.Get() - return s, err -} - -func (c *ConcreteSigar) GetHugeTLBPages() (HugeTLBPages, error) { - p := HugeTLBPages{} - err := p.Get() - return p, err -} - -func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { - f := FileSystemUsage{} - err := f.Get(path) - return f, err -} - -func (c *ConcreteSigar) GetFDUsage() (FDUsage, error) { - fd := FDUsage{} - err := fd.Get() - return fd, err -} - -// GetRusage return the resource usage of the process -// Possible params: 0 = RUSAGE_SELF, 1 = RUSAGE_CHILDREN, 2 = RUSAGE_THREAD -func (c *ConcreteSigar) GetRusage(who int) (Rusage, error) { - r := Rusage{} - err := r.Get(who) - return r, err -} diff --git a/vendor/github.com/elastic/gosigar/sigar_darwin.go b/vendor/github.com/elastic/gosigar/sigar_darwin.go deleted file mode 100644 index a90b998..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_darwin.go +++ /dev/null @@ -1,498 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -package gosigar - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os/user" - "runtime" - "strconv" - "syscall" - "time" - "unsafe" -) - -func (self *LoadAverage) Get() error { - avg := []C.double{0, 0, 0} - - C.getloadavg(&avg[0], C.int(len(avg))) - - self.One = float64(avg[0]) - self.Five = float64(avg[1]) - self.Fifteen = float64(avg[2]) - - return nil -} - -func (self *Uptime) Get() error { - tv := syscall.Timeval32{} - - if err := sysctlbyname("kern.boottime", &tv); err != nil { - return err - } - - self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() - - return nil -} - -func (self *Mem) Get() error { - var vmstat C.vm_statistics_data_t - - if err := sysctlbyname("hw.memsize", &self.Total); err != nil { - return err - } - - if err := vm_info(&vmstat); err != nil { - return err - } - - kern := uint64(vmstat.inactive_count) << 12 - self.Free = uint64(vmstat.free_count) << 12 - - self.Used = self.Total - self.Free - self.ActualFree = self.Free + kern - self.ActualUsed = self.Used - kern - - return nil -} - -type xsw_usage struct { - Total, Avail, Used uint64 -} - -func (self *Swap) Get() error { - sw_usage := xsw_usage{} - - if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { - return err - } - - self.Total = sw_usage.Total - self.Used = sw_usage.Used - self.Free = sw_usage.Avail - - return nil -} - -func (self *HugeTLBPages) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Cpu) Get() error { - var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT - var cpuload C.host_cpu_load_info_data_t - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return fmt.Errorf("host_statistics error=%d", status) - } - - self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) - self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) - self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) - self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) - - return nil -} - -func (self *CpuList) Get() error { - var count C.mach_msg_type_number_t - var cpuload *C.processor_cpu_load_info_data_t - var ncpu C.natural_t - - status := C.host_processor_info(C.host_t(C.mach_host_self()), - C.PROCESSOR_CPU_LOAD_INFO, - &ncpu, - (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return fmt.Errorf("host_processor_info error=%d", status) - } - - // jump through some cgo casting hoops and ensure we properly free - // the memory that cpuload points to - target := C.vm_map_t(C.mach_task_self_) - address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) - defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) - - // the body of struct processor_cpu_load_info - // aka processor_cpu_load_info_data_t - var cpu_ticks [C.CPU_STATE_MAX]uint32 - - // copy the cpuload array to a []byte buffer - // where we can binary.Read the data - size := int(ncpu) * binary.Size(cpu_ticks) - buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) - - bbuf := bytes.NewBuffer(buf) - - self.List = make([]Cpu, 0, ncpu) - - for i := 0; i < int(ncpu); i++ { - cpu := Cpu{} - - err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) - if err != nil { - return err - } - - cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) - cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) - cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) - cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) - - self.List = append(self.List, cpu) - } - - return nil -} - -func (self *FDUsage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *FileSystemList) Get() error { - num, err := syscall.Getfsstat(nil, C.MNT_NOWAIT) - if err != nil { - return err - } - - buf := make([]syscall.Statfs_t, num) - - _, err = syscall.Getfsstat(buf, C.MNT_NOWAIT) - if err != nil { - return err - } - - fslist := make([]FileSystem, 0, num) - - for i := 0; i < num; i++ { - fs := FileSystem{} - - fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) - fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) - fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) - - fslist = append(fslist, fs) - } - - self.List = fslist - - return err -} - -func (self *ProcList) Get() error { - n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) - if n <= 0 { - return syscall.EINVAL - } - buf := make([]byte, n) - n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) - if n <= 0 { - return syscall.ENOMEM - } - - var pid int32 - num := int(n) / binary.Size(pid) - list := make([]int, 0, num) - bbuf := bytes.NewBuffer(buf) - - for i := 0; i < num; i++ { - if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { - return err - } - if pid == 0 { - continue - } - - list = append(list, int(pid)) - } - - self.List = list - - return nil -} - -func (self *ProcState) Get(pid int) error { - info := C.struct_proc_taskallinfo{} - - if err := task_info(pid, &info); err != nil { - return err - } - - self.Name = C.GoString(&info.pbsd.pbi_comm[0]) - - switch info.pbsd.pbi_status { - case C.SIDL: - self.State = RunStateIdle - case C.SRUN: - self.State = RunStateRun - case C.SSLEEP: - self.State = RunStateSleep - case C.SSTOP: - self.State = RunStateStop - case C.SZOMB: - self.State = RunStateZombie - default: - self.State = RunStateUnknown - } - - self.Ppid = int(info.pbsd.pbi_ppid) - - self.Pgid = int(info.pbsd.pbi_pgid) - - self.Tty = int(info.pbsd.e_tdev) - - self.Priority = int(info.ptinfo.pti_priority) - - self.Nice = int(info.pbsd.pbi_nice) - - // Get process username. Fallback to UID if username is not available. - uid := strconv.Itoa(int(info.pbsd.pbi_uid)) - user, err := user.LookupId(uid) - if err == nil && user.Username != "" { - self.Username = user.Username - } else { - self.Username = uid - } - - return nil -} - -func (self *ProcMem) Get(pid int) error { - info := C.struct_proc_taskallinfo{} - - if err := task_info(pid, &info); err != nil { - return err - } - - self.Size = uint64(info.ptinfo.pti_virtual_size) - self.Resident = uint64(info.ptinfo.pti_resident_size) - self.PageFaults = uint64(info.ptinfo.pti_faults) - - return nil -} - -func (self *ProcTime) Get(pid int) error { - info := C.struct_proc_taskallinfo{} - - if err := task_info(pid, &info); err != nil { - return err - } - - self.User = - uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) - - self.Sys = - uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) - - self.Total = self.User + self.Sys - - self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + - (uint64(info.pbsd.pbi_start_tvusec) / 1000) - - return nil -} - -func (self *ProcArgs) Get(pid int) error { - var args []string - - argv := func(arg string) { - args = append(args, arg) - } - - err := kern_procargs(pid, nil, argv, nil) - - self.List = args - - return err -} - -func (self *ProcEnv) Get(pid int) error { - if self.Vars == nil { - self.Vars = map[string]string{} - } - - env := func(k, v string) { - self.Vars[k] = v - } - - return kern_procargs(pid, nil, nil, env) -} - -func (self *ProcExe) Get(pid int) error { - exe := func(arg string) { - self.Name = arg - } - - return kern_procargs(pid, exe, nil, nil) -} - -func (self *ProcFDUsage) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -// wrapper around sysctl KERN_PROCARGS2 -// callbacks params are optional, -// up to the caller as to which pieces of data they want -func kern_procargs(pid int, - exe func(string), - argv func(string), - env func(string, string)) error { - - mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - argmax := uintptr(C.ARG_MAX) - buf := make([]byte, argmax) - err := sysctl(mib, &buf[0], &argmax, nil, 0) - if err != nil { - return nil - } - - bbuf := bytes.NewBuffer(buf) - bbuf.Truncate(int(argmax)) - - var argc int32 - binary.Read(bbuf, binary.LittleEndian, &argc) - - path, err := bbuf.ReadBytes(0) - if err != nil { - return fmt.Errorf("Error reading the argv[0]: %v", err) - } - if exe != nil { - exe(string(chop(path))) - } - - // skip trailing \0's - for { - c, err := bbuf.ReadByte() - if err != nil { - return fmt.Errorf("Error skipping nils: %v", err) - } - if c != 0 { - bbuf.UnreadByte() - break // start of argv[0] - } - } - - for i := 0; i < int(argc); i++ { - arg, err := bbuf.ReadBytes(0) - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("Error reading args: %v", err) - } - if argv != nil { - argv(string(chop(arg))) - } - } - - if env == nil { - return nil - } - - delim := []byte{61} // "=" - - for { - line, err := bbuf.ReadBytes(0) - if err == io.EOF || line[0] == 0 { - break - } - if err != nil { - return fmt.Errorf("Error reading args: %v", err) - } - pair := bytes.SplitN(chop(line), delim, 2) - - if len(pair) != 2 { - return fmt.Errorf("Error reading process information for PID: %d", pid) - } - - env(string(pair[0]), string(pair[1])) - } - - return nil -} - -// XXX copied from zsyscall_darwin_amd64.go -func sysctl(mib []C.int, old *byte, oldlen *uintptr, - new *byte, newlen uintptr) (err error) { - var p0 unsafe.Pointer - p0 = unsafe.Pointer(&mib[0]) - _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = e1 - } - return -} - -func vm_info(vmstat *C.vm_statistics_data_t) error { - var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT - - status := C.host_statistics( - C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, - C.host_info_t(unsafe.Pointer(vmstat)), - &count) - - if status != C.KERN_SUCCESS { - return fmt.Errorf("host_statistics=%d", status) - } - - return nil -} - -// generic Sysctl buffer unmarshalling -func sysctlbyname(name string, data interface{}) (err error) { - val, err := syscall.Sysctl(name) - if err != nil { - return err - } - - buf := []byte(val) - - switch v := data.(type) { - case *uint64: - *v = *(*uint64)(unsafe.Pointer(&buf[0])) - return - } - - bbuf := bytes.NewBuffer([]byte(val)) - return binary.Read(bbuf, binary.LittleEndian, data) -} - -func task_info(pid int, info *C.struct_proc_taskallinfo) error { - size := C.int(unsafe.Sizeof(*info)) - ptr := unsafe.Pointer(info) - - n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) - if n != size { - return fmt.Errorf("Could not read process info for pid %d", pid) - } - - return nil -} diff --git a/vendor/github.com/elastic/gosigar/sigar_format.go b/vendor/github.com/elastic/gosigar/sigar_format.go deleted file mode 100644 index ac56c98..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_format.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -package gosigar - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "time" -) - -// Go version of apr_strfsize -func FormatSize(size uint64) string { - ord := []string{"K", "M", "G", "T", "P", "E"} - o := 0 - buf := new(bytes.Buffer) - w := bufio.NewWriter(buf) - - if size < 973 { - fmt.Fprintf(w, "%3d ", size) - w.Flush() - return buf.String() - } - - for { - remain := size & 1023 - size >>= 10 - - if size >= 973 { - o++ - continue - } - - if size < 9 || (size == 9 && remain < 973) { - remain = ((remain * 5) + 256) / 512 - if remain >= 10 { - size++ - remain = 0 - } - - fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) - break - } - - if remain >= 512 { - size++ - } - - fmt.Fprintf(w, "%3d%s", size, ord[o]) - break - } - - w.Flush() - return buf.String() -} - -func FormatPercent(percent float64) string { - return strconv.FormatFloat(percent, 'f', -1, 64) + "%" -} - -func (self *FileSystemUsage) UsePercent() float64 { - b_used := (self.Total - self.Free) / 1024 - b_avail := self.Avail / 1024 - utotal := b_used + b_avail - used := b_used - - if utotal != 0 { - u100 := used * 100 - pct := u100 / utotal - if u100%utotal != 0 { - pct += 1 - } - return (float64(pct) / float64(100)) * 100.0 - } - - return 0.0 -} - -func (self *Uptime) Format() string { - buf := new(bytes.Buffer) - w := bufio.NewWriter(buf) - uptime := uint64(self.Length) - - days := uptime / (60 * 60 * 24) - - if days != 0 { - s := "" - if days > 1 { - s = "s" - } - fmt.Fprintf(w, "%d day%s, ", days, s) - } - - minutes := uptime / 60 - hours := minutes / 60 - hours %= 24 - minutes %= 60 - - fmt.Fprintf(w, "%2d:%02d", hours, minutes) - - w.Flush() - return buf.String() -} - -func (self *ProcTime) FormatStartTime() string { - if self.StartTime == 0 { - return "00:00" - } - start := time.Unix(int64(self.StartTime)/1000, 0) - format := "Jan02" - if time.Since(start).Seconds() < (60 * 60 * 24) { - format = "15:04" - } - return start.Format(format) -} - -func (self *ProcTime) FormatTotal() string { - t := self.Total / 1000 - ss := t % 60 - t /= 60 - mm := t % 60 - t /= 60 - hh := t % 24 - return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) -} diff --git a/vendor/github.com/elastic/gosigar/sigar_freebsd.go b/vendor/github.com/elastic/gosigar/sigar_freebsd.go deleted file mode 100644 index 9b2af63..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_freebsd.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copied and modified from sigar_linux.go. - -package gosigar - -import ( - "io/ioutil" - "runtime" - "strconv" - "strings" - "unsafe" -) - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -*/ -import "C" - -func init() { - system.ticks = uint64(C.sysconf(C._SC_CLK_TCK)) - - Procd = "/compat/linux/proc" - - getLinuxBootTime() -} - -func getMountTableFileName() string { - return Procd + "/mtab" -} - -func (self *Uptime) Get() error { - ts := C.struct_timespec{} - - if _, err := C.clock_gettime(C.CLOCK_UPTIME, &ts); err != nil { - return err - } - - self.Length = float64(ts.tv_sec) + 1e-9*float64(ts.tv_nsec) - - return nil -} - -func (self *FDUsage) Get() error { - val := C.uint32_t(0) - sc := C.size_t(4) - - name := C.CString("kern.openfiles") - _, err := C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0) - C.free(unsafe.Pointer(name)) - if err != nil { - return err - } - self.Open = uint64(val) - - name = C.CString("kern.maxfiles") - _, err = C.sysctlbyname(name, unsafe.Pointer(&val), &sc, nil, 0) - C.free(unsafe.Pointer(name)) - if err != nil { - return err - } - self.Max = uint64(val) - - self.Unused = self.Max - self.Open - - return nil -} - -func (self *ProcFDUsage) Get(pid int) error { - err := readFile("/proc/"+strconv.Itoa(pid)+"/rlimit", func(line string) bool { - if strings.HasPrefix(line, "nofile") { - fields := strings.Fields(line) - if len(fields) == 3 { - self.SoftLimit, _ = strconv.ParseUint(fields[1], 10, 64) - self.HardLimit, _ = strconv.ParseUint(fields[2], 10, 64) - } - return false - } - return true - }) - if err != nil { - return err - } - - // linprocfs only provides this information for this process (self). - fds, err := ioutil.ReadDir(procFileName(pid, "fd")) - if err != nil { - return err - } - self.Open = uint64(len(fds)) - - return nil -} - -func (self *HugeTLBPages) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func parseCpuStat(self *Cpu, line string) error { - fields := strings.Fields(line) - - self.User, _ = strtoull(fields[1]) - self.Nice, _ = strtoull(fields[2]) - self.Sys, _ = strtoull(fields[3]) - self.Idle, _ = strtoull(fields[4]) - return nil -} diff --git a/vendor/github.com/elastic/gosigar/sigar_interface.go b/vendor/github.com/elastic/gosigar/sigar_interface.go deleted file mode 100644 index df79ae0..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_interface.go +++ /dev/null @@ -1,207 +0,0 @@ -package gosigar - -import ( - "time" -) - -type ErrNotImplemented struct { - OS string -} - -func (e ErrNotImplemented) Error() string { - return "not implemented on " + e.OS -} - -func IsNotImplemented(err error) bool { - switch err.(type) { - case ErrNotImplemented, *ErrNotImplemented: - return true - default: - return false - } -} - -type Sigar interface { - CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) - GetLoadAverage() (LoadAverage, error) - GetMem() (Mem, error) - GetSwap() (Swap, error) - GetHugeTLBPages(HugeTLBPages, error) - GetFileSystemUsage(string) (FileSystemUsage, error) - GetFDUsage() (FDUsage, error) - GetRusage(who int) (Rusage, error) -} - -type Cpu struct { - User uint64 - Nice uint64 - Sys uint64 - Idle uint64 - Wait uint64 - Irq uint64 - SoftIrq uint64 - Stolen uint64 -} - -func (cpu *Cpu) Total() uint64 { - return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + - cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen -} - -func (cpu Cpu) Delta(other Cpu) Cpu { - return Cpu{ - User: cpu.User - other.User, - Nice: cpu.Nice - other.Nice, - Sys: cpu.Sys - other.Sys, - Idle: cpu.Idle - other.Idle, - Wait: cpu.Wait - other.Wait, - Irq: cpu.Irq - other.Irq, - SoftIrq: cpu.SoftIrq - other.SoftIrq, - Stolen: cpu.Stolen - other.Stolen, - } -} - -type LoadAverage struct { - One, Five, Fifteen float64 -} - -type Uptime struct { - Length float64 -} - -type Mem struct { - Total uint64 - Used uint64 - Free uint64 - ActualFree uint64 - ActualUsed uint64 -} - -type Swap struct { - Total uint64 - Used uint64 - Free uint64 -} - -type HugeTLBPages struct { - Total uint64 - Free uint64 - Reserved uint64 - Surplus uint64 - DefaultSize uint64 - TotalAllocatedSize uint64 -} - -type CpuList struct { - List []Cpu -} - -type FDUsage struct { - Open uint64 - Unused uint64 - Max uint64 -} - -type FileSystem struct { - DirName string - DevName string - TypeName string - SysTypeName string - Options string - Flags uint32 -} - -type FileSystemList struct { - List []FileSystem -} - -type FileSystemUsage struct { - Total uint64 - Used uint64 - Free uint64 - Avail uint64 - Files uint64 - FreeFiles uint64 -} - -type ProcList struct { - List []int -} - -type RunState byte - -const ( - RunStateSleep = 'S' - RunStateRun = 'R' - RunStateStop = 'T' - RunStateZombie = 'Z' - RunStateIdle = 'D' - RunStateUnknown = '?' -) - -type ProcState struct { - Name string - Username string - State RunState - Ppid int - Pgid int - Tty int - Priority int - Nice int - Processor int -} - -type ProcMem struct { - Size uint64 - Resident uint64 - Share uint64 - MinorFaults uint64 - MajorFaults uint64 - PageFaults uint64 -} - -type ProcTime struct { - StartTime uint64 - User uint64 - Sys uint64 - Total uint64 -} - -type ProcArgs struct { - List []string -} - -type ProcEnv struct { - Vars map[string]string -} - -type ProcExe struct { - Name string - Cwd string - Root string -} - -type ProcFDUsage struct { - Open uint64 - SoftLimit uint64 - HardLimit uint64 -} - -type Rusage struct { - Utime time.Duration - Stime time.Duration - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} diff --git a/vendor/github.com/elastic/gosigar/sigar_linux.go b/vendor/github.com/elastic/gosigar/sigar_linux.go deleted file mode 100644 index 09f2e30..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_linux.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -package gosigar - -import ( - "io/ioutil" - "strconv" - "strings" - "syscall" -) - -func init() { - system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) - - Procd = "/proc" - - getLinuxBootTime() -} - -func getMountTableFileName() string { - return "/etc/mtab" -} - -func (self *Uptime) Get() error { - sysinfo := syscall.Sysinfo_t{} - - if err := syscall.Sysinfo(&sysinfo); err != nil { - return err - } - - self.Length = float64(sysinfo.Uptime) - - return nil -} - -func (self *FDUsage) Get() error { - return readFile(Procd+"/sys/fs/file-nr", func(line string) bool { - fields := strings.Fields(line) - if len(fields) == 3 { - self.Open, _ = strconv.ParseUint(fields[0], 10, 64) - self.Unused, _ = strconv.ParseUint(fields[1], 10, 64) - self.Max, _ = strconv.ParseUint(fields[2], 10, 64) - } - return false - }) -} - -func (self *HugeTLBPages) Get() error { - table, err := parseMeminfo() - if err != nil { - return err - } - - self.Total, _ = table["HugePages_Total"] - self.Free, _ = table["HugePages_Free"] - self.Reserved, _ = table["HugePages_Rsvd"] - self.Surplus, _ = table["HugePages_Surp"] - self.DefaultSize, _ = table["Hugepagesize"] - - if totalSize, found := table["Hugetlb"]; found { - self.TotalAllocatedSize = totalSize - } else { - // If Hugetlb is not present, or huge pages of different sizes - // are used, this figure can be unaccurate. - // TODO (jsoriano): Extract information from /sys/kernel/mm/hugepages too - self.TotalAllocatedSize = (self.Total - self.Free + self.Reserved) * self.DefaultSize - } - - return nil -} - -func (self *ProcFDUsage) Get(pid int) error { - err := readFile(procFileName(pid, "limits"), func(line string) bool { - if strings.HasPrefix(line, "Max open files") { - fields := strings.Fields(line) - if len(fields) == 6 { - self.SoftLimit, _ = strconv.ParseUint(fields[3], 10, 64) - self.HardLimit, _ = strconv.ParseUint(fields[4], 10, 64) - } - return false - } - return true - }) - if err != nil { - return err - } - fds, err := ioutil.ReadDir(procFileName(pid, "fd")) - if err != nil { - return err - } - self.Open = uint64(len(fds)) - return nil -} - -func parseCpuStat(self *Cpu, line string) error { - fields := strings.Fields(line) - - self.User, _ = strtoull(fields[1]) - self.Nice, _ = strtoull(fields[2]) - self.Sys, _ = strtoull(fields[3]) - self.Idle, _ = strtoull(fields[4]) - self.Wait, _ = strtoull(fields[5]) - self.Irq, _ = strtoull(fields[6]) - self.SoftIrq, _ = strtoull(fields[7]) - self.Stolen, _ = strtoull(fields[8]) - - return nil -} diff --git a/vendor/github.com/elastic/gosigar/sigar_linux_common.go b/vendor/github.com/elastic/gosigar/sigar_linux_common.go deleted file mode 100644 index 7ca6497..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_linux_common.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -// +build freebsd linux - -package gosigar - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/user" - "path/filepath" - "strconv" - "strings" - "syscall" -) - -var system struct { - ticks uint64 - btime uint64 -} - -var Procd string - -func getLinuxBootTime() { - // grab system boot time - readFile(Procd+"/stat", func(line string) bool { - if strings.HasPrefix(line, "btime") { - system.btime, _ = strtoull(line[6:]) - return false // stop reading - } - return true - }) -} - -func (self *LoadAverage) Get() error { - line, err := ioutil.ReadFile(Procd + "/loadavg") - if err != nil { - return nil - } - - fields := strings.Fields(string(line)) - - self.One, _ = strconv.ParseFloat(fields[0], 64) - self.Five, _ = strconv.ParseFloat(fields[1], 64) - self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) - - return nil -} - -func (self *Mem) Get() error { - - table, err := parseMeminfo() - if err != nil { - return err - } - - self.Total, _ = table["MemTotal"] - self.Free, _ = table["MemFree"] - buffers, _ := table["Buffers"] - cached, _ := table["Cached"] - - if available, ok := table["MemAvailable"]; ok { - // MemAvailable is in /proc/meminfo (kernel 3.14+) - self.ActualFree = available - } else { - self.ActualFree = self.Free + buffers + cached - } - - self.Used = self.Total - self.Free - self.ActualUsed = self.Total - self.ActualFree - - return nil -} - -func (self *Swap) Get() error { - - table, err := parseMeminfo() - if err != nil { - return err - } - self.Total, _ = table["SwapTotal"] - self.Free, _ = table["SwapFree"] - - self.Used = self.Total - self.Free - return nil -} - -func (self *Cpu) Get() error { - return readFile(Procd+"/stat", func(line string) bool { - if len(line) > 4 && line[0:4] == "cpu " { - parseCpuStat(self, line) - return false - } - return true - - }) -} - -func (self *CpuList) Get() error { - capacity := len(self.List) - if capacity == 0 { - capacity = 4 - } - list := make([]Cpu, 0, capacity) - - err := readFile(Procd+"/stat", func(line string) bool { - if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { - cpu := Cpu{} - parseCpuStat(&cpu, line) - list = append(list, cpu) - } - return true - }) - - self.List = list - - return err -} - -func (self *FileSystemList) Get() error { - capacity := len(self.List) - if capacity == 0 { - capacity = 10 - } - fslist := make([]FileSystem, 0, capacity) - - err := readFile(getMountTableFileName(), func(line string) bool { - fields := strings.Fields(line) - - fs := FileSystem{} - fs.DevName = fields[0] - fs.DirName = fields[1] - fs.SysTypeName = fields[2] - fs.Options = fields[3] - - fslist = append(fslist, fs) - - return true - }) - - self.List = fslist - - return err -} - -func (self *ProcList) Get() error { - dir, err := os.Open(Procd) - if err != nil { - return err - } - defer dir.Close() - - const readAllDirnames = -1 // see os.File.Readdirnames doc - - names, err := dir.Readdirnames(readAllDirnames) - if err != nil { - return err - } - - capacity := len(names) - list := make([]int, 0, capacity) - - for _, name := range names { - if name[0] < '0' || name[0] > '9' { - continue - } - pid, err := strconv.Atoi(name) - if err == nil { - list = append(list, pid) - } - } - - self.List = list - - return nil -} - -func (self *ProcState) Get(pid int) error { - data, err := readProcFile(pid, "stat") - if err != nil { - return err - } - - // Extract the comm value with is surrounded by parentheses. - lIdx := bytes.Index(data, []byte("(")) - rIdx := bytes.LastIndex(data, []byte(")")) - if lIdx < 0 || rIdx < 0 || lIdx >= rIdx || rIdx+2 >= len(data) { - return fmt.Errorf("failed to extract comm for pid %d from '%v'", pid, string(data)) - } - self.Name = string(data[lIdx+1 : rIdx]) - - // Extract the rest of the fields that we are interested in. - fields := bytes.Fields(data[rIdx+2:]) - if len(fields) <= 36 { - return fmt.Errorf("expected more stat fields for pid %d from '%v'", pid, string(data)) - } - - interests := bytes.Join([][]byte{ - fields[0], // state - fields[1], // ppid - fields[2], // pgrp - fields[4], // tty_nr - fields[15], // priority - fields[16], // nice - fields[36], // processor (last processor executed on) - }, []byte(" ")) - - var state string - _, err = fmt.Fscan(bytes.NewBuffer(interests), - &state, - &self.Ppid, - &self.Pgid, - &self.Tty, - &self.Priority, - &self.Nice, - &self.Processor, - ) - if err != nil { - return fmt.Errorf("failed to parse stat fields for pid %d from '%v': %v", pid, string(data), err) - } - self.State = RunState(state[0]) - - // Read /proc/[pid]/status to get the uid, then lookup uid to get username. - status, err := getProcStatus(pid) - if err != nil { - return fmt.Errorf("failed to read process status for pid %d: %v", pid, err) - } - uids, err := getUIDs(status) - if err != nil { - return fmt.Errorf("failed to read process status for pid %d: %v", pid, err) - } - user, err := user.LookupId(uids[0]) - if err == nil { - self.Username = user.Username - } else { - self.Username = uids[0] - } - - return nil -} - -func (self *ProcMem) Get(pid int) error { - contents, err := readProcFile(pid, "statm") - if err != nil { - return err - } - - fields := strings.Fields(string(contents)) - - size, _ := strtoull(fields[0]) - self.Size = size << 12 - - rss, _ := strtoull(fields[1]) - self.Resident = rss << 12 - - share, _ := strtoull(fields[2]) - self.Share = share << 12 - - contents, err = readProcFile(pid, "stat") - if err != nil { - return err - } - - fields = strings.Fields(string(contents)) - - self.MinorFaults, _ = strtoull(fields[10]) - self.MajorFaults, _ = strtoull(fields[12]) - self.PageFaults = self.MinorFaults + self.MajorFaults - - return nil -} - -func (self *ProcTime) Get(pid int) error { - contents, err := readProcFile(pid, "stat") - if err != nil { - return err - } - - fields := strings.Fields(string(contents)) - - user, _ := strtoull(fields[13]) - sys, _ := strtoull(fields[14]) - // convert to millis - self.User = user * (1000 / system.ticks) - self.Sys = sys * (1000 / system.ticks) - self.Total = self.User + self.Sys - - // convert to millis - self.StartTime, _ = strtoull(fields[21]) - self.StartTime /= system.ticks - self.StartTime += system.btime - self.StartTime *= 1000 - - return nil -} - -func (self *ProcArgs) Get(pid int) error { - contents, err := readProcFile(pid, "cmdline") - if err != nil { - return err - } - - bbuf := bytes.NewBuffer(contents) - - var args []string - - for { - arg, err := bbuf.ReadBytes(0) - if err == io.EOF { - break - } - args = append(args, string(chop(arg))) - } - - self.List = args - - return nil -} - -func (self *ProcEnv) Get(pid int) error { - contents, err := readProcFile(pid, "environ") - if err != nil { - return err - } - - if self.Vars == nil { - self.Vars = map[string]string{} - } - - pairs := bytes.Split(contents, []byte{0}) - for _, kv := range pairs { - parts := bytes.SplitN(kv, []byte{'='}, 2) - if len(parts) != 2 { - continue - } - - key := string(bytes.TrimSpace(parts[0])) - if key == "" { - continue - } - - self.Vars[key] = string(bytes.TrimSpace(parts[1])) - } - - return nil -} - -func (self *ProcExe) Get(pid int) error { - fields := map[string]*string{ - "exe": &self.Name, - "cwd": &self.Cwd, - "root": &self.Root, - } - - for name, field := range fields { - val, err := os.Readlink(procFileName(pid, name)) - - if err != nil { - return err - } - - *field = val - } - - return nil -} - -func parseMeminfo() (map[string]uint64, error) { - table := map[string]uint64{} - - err := readFile(Procd+"/meminfo", func(line string) bool { - fields := strings.Split(line, ":") - - if len(fields) != 2 { - return true // skip on errors - } - - valueUnit := strings.Fields(fields[1]) - value, err := strtoull(valueUnit[0]) - if err != nil { - return true // skip on errors - } - - if len(valueUnit) > 1 && valueUnit[1] == "kB" { - value *= 1024 - } - table[fields[0]] = value - - return true - }) - return table, err -} - -func readFile(file string, handler func(string) bool) error { - contents, err := ioutil.ReadFile(file) - if err != nil { - return err - } - - reader := bufio.NewReader(bytes.NewBuffer(contents)) - - for { - line, _, err := reader.ReadLine() - if err == io.EOF { - break - } - if !handler(string(line)) { - break - } - } - - return nil -} - -func strtoull(val string) (uint64, error) { - return strconv.ParseUint(val, 10, 64) -} - -func procFileName(pid int, name string) string { - return Procd + "/" + strconv.Itoa(pid) + "/" + name -} - -func readProcFile(pid int, name string) (content []byte, err error) { - path := procFileName(pid, name) - - // Panics have been reported when reading proc files, let's recover and - // report the path if this happens - // See https://github.com/elastic/beats/issues/6692 - defer func() { - if r := recover(); r != nil { - content = nil - err = fmt.Errorf("recovered panic when reading proc file '%s': %v", path, r) - } - }() - contents, err := ioutil.ReadFile(path) - - if err != nil { - if perr, ok := err.(*os.PathError); ok { - if perr.Err == syscall.ENOENT { - return nil, syscall.ESRCH - } - } - } - - return contents, err -} - -// getProcStatus reads /proc/[pid]/status which contains process status -// information in human readable form. -func getProcStatus(pid int) (map[string]string, error) { - status := make(map[string]string, 42) - path := filepath.Join(Procd, strconv.Itoa(pid), "status") - err := readFile(path, func(line string) bool { - fields := strings.SplitN(line, ":", 2) - if len(fields) == 2 { - status[fields[0]] = strings.TrimSpace(fields[1]) - } - - return true - }) - return status, err -} - -// getUIDs reads the "Uid" value from status and splits it into four values -- -// real, effective, saved set, and file system UIDs. -func getUIDs(status map[string]string) ([]string, error) { - uidLine, ok := status["Uid"] - if !ok { - return nil, fmt.Errorf("Uid not found in proc status") - } - - uidStrs := strings.Fields(uidLine) - if len(uidStrs) != 4 { - return nil, fmt.Errorf("Uid line ('%s') did not contain four values", uidLine) - } - - return uidStrs, nil -} diff --git a/vendor/github.com/elastic/gosigar/sigar_openbsd.go b/vendor/github.com/elastic/gosigar/sigar_openbsd.go deleted file mode 100644 index e4371b8..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_openbsd.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright (c) 2016 Jasper Lievisse Adriaanse . - -// +build openbsd - -package gosigar - -/* -#include -#include -#include -#include -#include -#include -#include -#include -*/ -import "C" - -//import "github.com/davecgh/go-spew/spew" - -import ( - "runtime" - "syscall" - "time" - "unsafe" -) - -type Uvmexp struct { - pagesize uint32 - pagemask uint32 - pageshift uint32 - npages uint32 - free uint32 - active uint32 - inactive uint32 - paging uint32 - wired uint32 - zeropages uint32 - reserve_pagedaemon uint32 - reserve_kernel uint32 - anonpages uint32 - vnodepages uint32 - vtextpages uint32 - freemin uint32 - freetarg uint32 - inactarg uint32 - wiredmax uint32 - anonmin uint32 - vtextmin uint32 - vnodemin uint32 - anonminpct uint32 - vtextmi uint32 - npct uint32 - vnodeminpct uint32 - nswapdev uint32 - swpages uint32 - swpginuse uint32 - swpgonly uint32 - nswget uint32 - nanon uint32 - nanonneeded uint32 - nfreeanon uint32 - faults uint32 - traps uint32 - intrs uint32 - swtch uint32 - softs uint32 - syscalls uint32 - pageins uint32 - obsolete_swapins uint32 - obsolete_swapouts uint32 - pgswapin uint32 - pgswapout uint32 - forks uint32 - forks_ppwait uint32 - forks_sharevm uint32 - pga_zerohit uint32 - pga_zeromiss uint32 - zeroaborts uint32 - fltnoram uint32 - fltnoanon uint32 - fltpgwait uint32 - fltpgrele uint32 - fltrelck uint32 - fltrelckok uint32 - fltanget uint32 - fltanretry uint32 - fltamcopy uint32 - fltnamap uint32 - fltnomap uint32 - fltlget uint32 - fltget uint32 - flt_anon uint32 - flt_acow uint32 - flt_obj uint32 - flt_prcopy uint32 - flt_przero uint32 - pdwoke uint32 - pdrevs uint32 - pdswout uint32 - pdfreed uint32 - pdscans uint32 - pdanscan uint32 - pdobscan uint32 - pdreact uint32 - pdbusy uint32 - pdpageouts uint32 - pdpending uint32 - pddeact uint32 - pdreanon uint32 - pdrevnode uint32 - pdrevtext uint32 - fpswtch uint32 - kmapent uint32 -} - -type Bcachestats struct { - numbufs uint64 - numbufpages uint64 - numdirtypages uint64 - numcleanpages uint64 - pendingwrites uint64 - pendingreads uint64 - numwrites uint64 - numreads uint64 - cachehits uint64 - busymapped uint64 - dmapages uint64 - highpages uint64 - delwribufs uint64 - kvaslots uint64 - kvaslots_avail uint64 -} - -type Swapent struct { - se_dev C.dev_t - se_flags int32 - se_nblks int32 - se_inuse int32 - se_priority int32 - sw_path []byte -} - -func (self *FileSystemList) Get() error { - num, err := syscall.Getfsstat(nil, C.MNT_NOWAIT) - if err != nil { - return err - } - - buf := make([]syscall.Statfs_t, num) - - _, err = syscall.Getfsstat(buf, C.MNT_NOWAIT) - if err != nil { - return err - } - - fslist := make([]FileSystem, 0, num) - - for i := 0; i < num; i++ { - fs := FileSystem{} - - fs.DirName = bytePtrToString(&buf[i].F_mntonname[0]) - fs.DevName = bytePtrToString(&buf[i].F_mntfromname[0]) - fs.SysTypeName = bytePtrToString(&buf[i].F_fstypename[0]) - - fslist = append(fslist, fs) - } - - self.List = fslist - - return err -} - -func (self *FileSystemUsage) Get(path string) error { - stat := syscall.Statfs_t{} - err := syscall.Statfs(path, &stat) - if err != nil { - return err - } - - self.Total = uint64(stat.F_blocks) * uint64(stat.F_bsize) - self.Free = uint64(stat.F_bfree) * uint64(stat.F_bsize) - self.Avail = uint64(stat.F_bavail) * uint64(stat.F_bsize) - self.Used = self.Total - self.Free - self.Files = stat.F_files - self.FreeFiles = stat.F_ffree - - return nil -} - -func (self *FDUsage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *LoadAverage) Get() error { - avg := []C.double{0, 0, 0} - - C.getloadavg(&avg[0], C.int(len(avg))) - - self.One = float64(avg[0]) - self.Five = float64(avg[1]) - self.Fifteen = float64(avg[2]) - - return nil -} - -func (self *Uptime) Get() error { - tv := syscall.Timeval{} - mib := [2]int32{C.CTL_KERN, C.KERN_BOOTTIME} - - n := uintptr(0) - // First we determine how much memory we'll need to pass later on (via `n`) - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - - if errno != 0 || n == 0 { - return nil - } - - // Now perform the actual sysctl(3) call, storing the result in tv - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&tv)), uintptr(unsafe.Pointer(&n)), 0, 0) - - if errno != 0 || n == 0 { - return nil - } - - self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() - - return nil -} - -func (self *Mem) Get() error { - n := uintptr(0) - - var uvmexp Uvmexp - mib := [2]int32{C.CTL_VM, C.VM_UVMEXP} - n = uintptr(0) - // First we determine how much memory we'll need to pass later on (via `n`) - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&uvmexp)), uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - var bcachestats Bcachestats - mib3 := [3]int32{C.CTL_VFS, C.VFS_GENERIC, C.VFS_BCACHESTAT} - n = uintptr(0) - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib3[0])), 3, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib3[0])), 3, uintptr(unsafe.Pointer(&bcachestats)), uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - self.Total = uint64(uvmexp.npages) << uvmexp.pageshift - self.Used = uint64(uvmexp.npages-uvmexp.free) << uvmexp.pageshift - self.Free = uint64(uvmexp.free) << uvmexp.pageshift - - self.ActualFree = self.Free + (uint64(bcachestats.numbufpages) << uvmexp.pageshift) - self.ActualUsed = self.Used - (uint64(bcachestats.numbufpages) << uvmexp.pageshift) - - return nil -} - -func (self *Swap) Get() error { - nswap := C.swapctl(C.SWAP_NSWAP, unsafe.Pointer(uintptr(0)), 0) - - // If there are no swap devices, nothing to do here. - if nswap == 0 { - return nil - } - - swdev := make([]Swapent, nswap) - - rnswap := C.swapctl(C.SWAP_STATS, unsafe.Pointer(&swdev[0]), nswap) - if rnswap == 0 { - return nil - } - - for i := 0; i < int(nswap); i++ { - if swdev[i].se_flags&C.SWF_ENABLE == 2 { - self.Used = self.Used + uint64(swdev[i].se_inuse/(1024/C.DEV_BSIZE)) - self.Total = self.Total + uint64(swdev[i].se_nblks/(1024/C.DEV_BSIZE)) - } - } - - self.Free = self.Total - self.Used - - return nil -} - -func (self *HugeTLBPages) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Cpu) Get() error { - load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE} - - mib := [2]int32{C.CTL_KERN, C.KERN_CPTIME} - n := uintptr(0) - // First we determine how much memory we'll need to pass later on (via `n`) - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&load)), uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - self.User = uint64(load[0]) - self.Nice = uint64(load[1]) - self.Sys = uint64(load[2]) - self.Irq = uint64(load[3]) - self.Idle = uint64(load[4]) - - return nil -} - -func (self *CpuList) Get() error { - mib := [2]int32{C.CTL_HW, C.HW_NCPU} - var ncpu int - - n := uintptr(0) - // First we determine how much memory we'll need to pass later on (via `n`) - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - - if errno != 0 || n == 0 { - return nil - } - - // Now perform the actual sysctl(3) call, storing the result in ncpu - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 2, uintptr(unsafe.Pointer(&ncpu)), uintptr(unsafe.Pointer(&n)), 0, 0) - - if errno != 0 || n == 0 { - return nil - } - - load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE} - - self.List = make([]Cpu, ncpu) - for curcpu := range self.List { - sysctlCptime(ncpu, curcpu, &load) - fillCpu(&self.List[curcpu], load) - } - - return nil -} - -func (self *ProcList) Get() error { - return nil -} - -func (self *ProcArgs) Get(pid int) error { - return nil -} - -func (self *ProcEnv) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *ProcState) Get(pid int) error { - return nil -} - -func (self *ProcMem) Get(pid int) error { - return nil -} - -func (self *ProcTime) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *ProcExe) Get(pid int) error { - return nil -} - -func (self *ProcFDUsage) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Rusage) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func fillCpu(cpu *Cpu, load [C.CPUSTATES]C.long) { - cpu.User = uint64(load[0]) - cpu.Nice = uint64(load[1]) - cpu.Sys = uint64(load[2]) - cpu.Irq = uint64(load[3]) - cpu.Idle = uint64(load[4]) -} - -func sysctlCptime(ncpu int, curcpu int, load *[C.CPUSTATES]C.long) error { - var mib []int32 - - // Use the correct mib based on the number of CPUs and fill out the - // current CPU number in case of SMP. (0 indexed cf. self.List) - if ncpu == 0 { - mib = []int32{C.CTL_KERN, C.KERN_CPTIME} - } else { - mib = []int32{C.CTL_KERN, C.KERN_CPTIME2, int32(curcpu)} - } - - len := len(mib) - - n := uintptr(0) - // First we determine how much memory we'll need to pass later on (via `n`) - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(len), 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - _, _, errno = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), uintptr(len), uintptr(unsafe.Pointer(load)), uintptr(unsafe.Pointer(&n)), 0, 0) - if errno != 0 || n == 0 { - return nil - } - - return nil -} diff --git a/vendor/github.com/elastic/gosigar/sigar_stub.go b/vendor/github.com/elastic/gosigar/sigar_stub.go deleted file mode 100644 index de9565a..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_stub.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !darwin,!freebsd,!linux,!openbsd,!windows - -package gosigar - -import ( - "runtime" -) - -func (c *Cpu) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (l *LoadAverage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (m *Mem) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (s *Swap) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (s *HugeTLBPages) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (f *FDUsage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcTime) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *FileSystemUsage) Get(path string) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *CpuList) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcState) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcExe) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcMem) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcFDUsage) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcEnv) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcList) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (p *ProcArgs) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Rusage) Get(int) error { - return ErrNotImplemented{runtime.GOOS} -} diff --git a/vendor/github.com/elastic/gosigar/sigar_unix.go b/vendor/github.com/elastic/gosigar/sigar_unix.go deleted file mode 100644 index 3f3a9f7..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_unix.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -// +build darwin freebsd linux - -package gosigar - -import ( - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -func (self *FileSystemUsage) Get(path string) error { - stat := syscall.Statfs_t{} - err := syscall.Statfs(path, &stat) - if err != nil { - return err - } - - self.Total = uint64(stat.Blocks) * uint64(stat.Bsize) - self.Free = uint64(stat.Bfree) * uint64(stat.Bsize) - self.Avail = uint64(stat.Bavail) * uint64(stat.Bsize) - self.Used = self.Total - self.Free - self.Files = stat.Files - self.FreeFiles = uint64(stat.Ffree) - - return nil -} - -func (r *Rusage) Get(who int) error { - ru, err := getResourceUsage(who) - if err != nil { - return err - } - - uTime := convertRtimeToDur(ru.Utime) - sTime := convertRtimeToDur(ru.Stime) - - r.Utime = uTime - r.Stime = sTime - r.Maxrss = int64(ru.Maxrss) - r.Ixrss = int64(ru.Ixrss) - r.Idrss = int64(ru.Idrss) - r.Isrss = int64(ru.Isrss) - r.Minflt = int64(ru.Minflt) - r.Majflt = int64(ru.Majflt) - r.Nswap = int64(ru.Nswap) - r.Inblock = int64(ru.Inblock) - r.Oublock = int64(ru.Oublock) - r.Msgsnd = int64(ru.Msgsnd) - r.Msgrcv = int64(ru.Msgrcv) - r.Nsignals = int64(ru.Nsignals) - r.Nvcsw = int64(ru.Nvcsw) - r.Nivcsw = int64(ru.Nivcsw) - - return nil -} - -func getResourceUsage(who int) (unix.Rusage, error) { - r := unix.Rusage{} - err := unix.Getrusage(who, &r) - - return r, err -} - -func convertRtimeToDur(t unix.Timeval) time.Duration { - return time.Duration(t.Nano()) -} diff --git a/vendor/github.com/elastic/gosigar/sigar_util.go b/vendor/github.com/elastic/gosigar/sigar_util.go deleted file mode 100644 index bf93b02..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_util.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -package gosigar - -import ( - "unsafe" -) - -func bytePtrToString(ptr *int8) string { - bytes := (*[10000]byte)(unsafe.Pointer(ptr)) - - n := 0 - for bytes[n] != 0 { - n++ - } - - return string(bytes[0:n]) -} - -func chop(buf []byte) []byte { - return buf[0 : len(buf)-1] -} diff --git a/vendor/github.com/elastic/gosigar/sigar_windows.go b/vendor/github.com/elastic/gosigar/sigar_windows.go deleted file mode 100644 index c2b54d8..0000000 --- a/vendor/github.com/elastic/gosigar/sigar_windows.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2012 VMware, Inc. - -package gosigar - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "syscall" - "time" - - "github.com/StackExchange/wmi" - "github.com/elastic/gosigar/sys/windows" - "github.com/pkg/errors" -) - -// Win32_Process represents a process on the Windows operating system. If -// additional fields are added here (that match the Windows struct) they will -// automatically be populated when calling getWin32Process. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa394372(v=vs.85).aspx -type Win32_Process struct { - CommandLine string -} - -// Win32_OperatingSystem WMI class represents a Windows-based operating system -// installed on a computer. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa394239(v=vs.85).aspx -type Win32_OperatingSystem struct { - LastBootUpTime time.Time -} - -var ( - // version is Windows version of the host OS. - version = windows.GetWindowsVersion() - - // processQueryLimitedInfoAccess is set to PROCESS_QUERY_INFORMATION for Windows - // 2003 and XP where PROCESS_QUERY_LIMITED_INFORMATION is unknown. For all newer - // OS versions it is set to PROCESS_QUERY_LIMITED_INFORMATION. - processQueryLimitedInfoAccess = windows.PROCESS_QUERY_LIMITED_INFORMATION - - // bootTime is the time when the OS was last booted. This value may be nil - // on operating systems that do not support the WMI query used to obtain it. - bootTime *time.Time - bootTimeLock sync.Mutex -) - -func init() { - if !version.IsWindowsVistaOrGreater() { - // PROCESS_QUERY_LIMITED_INFORMATION cannot be used on 2003 or XP. - processQueryLimitedInfoAccess = syscall.PROCESS_QUERY_INFORMATION - } -} - -func (self *LoadAverage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *FDUsage) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *ProcEnv) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *ProcExe) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *ProcFDUsage) Get(pid int) error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Uptime) Get() error { - // Minimum supported OS is Windows Vista. - if !version.IsWindowsVistaOrGreater() { - return ErrNotImplemented{runtime.GOOS} - } - - bootTimeLock.Lock() - defer bootTimeLock.Unlock() - if bootTime == nil { - os, err := getWin32OperatingSystem() - if err != nil { - return errors.Wrap(err, "failed to get boot time using WMI") - } - bootTime = &os.LastBootUpTime - } - - self.Length = time.Since(*bootTime).Seconds() - return nil -} - -func (self *Mem) Get() error { - memoryStatusEx, err := windows.GlobalMemoryStatusEx() - if err != nil { - return errors.Wrap(err, "GlobalMemoryStatusEx failed") - } - - self.Total = memoryStatusEx.TotalPhys - self.Free = memoryStatusEx.AvailPhys - self.Used = self.Total - self.Free - self.ActualFree = self.Free - self.ActualUsed = self.Used - return nil -} - -func (self *Swap) Get() error { - memoryStatusEx, err := windows.GlobalMemoryStatusEx() - if err != nil { - return errors.Wrap(err, "GlobalMemoryStatusEx failed") - } - - self.Total = memoryStatusEx.TotalPageFile - self.Free = memoryStatusEx.AvailPageFile - self.Used = self.Total - self.Free - return nil -} - -func (self *HugeTLBPages) Get() error { - return ErrNotImplemented{runtime.GOOS} -} - -func (self *Cpu) Get() error { - idle, kernel, user, err := windows.GetSystemTimes() - if err != nil { - return errors.Wrap(err, "GetSystemTimes failed") - } - - // CPU times are reported in milliseconds by gosigar. - self.Idle = uint64(idle / time.Millisecond) - self.Sys = uint64(kernel / time.Millisecond) - self.User = uint64(user / time.Millisecond) - return nil -} - -func (self *CpuList) Get() error { - cpus, err := windows.NtQuerySystemProcessorPerformanceInformation() - if err != nil { - return errors.Wrap(err, "NtQuerySystemProcessorPerformanceInformation failed") - } - - self.List = make([]Cpu, 0, len(cpus)) - for _, cpu := range cpus { - self.List = append(self.List, Cpu{ - Idle: uint64(cpu.IdleTime / time.Millisecond), - Sys: uint64(cpu.KernelTime / time.Millisecond), - User: uint64(cpu.UserTime / time.Millisecond), - }) - } - return nil -} - -func (self *FileSystemList) Get() error { - drives, err := windows.GetLogicalDriveStrings() - if err != nil { - return errors.Wrap(err, "GetLogicalDriveStrings failed") - } - - for _, drive := range drives { - dt, err := windows.GetDriveType(drive) - if err != nil { - return errors.Wrapf(err, "GetDriveType failed") - } - - self.List = append(self.List, FileSystem{ - DirName: drive, - DevName: drive, - TypeName: dt.String(), - }) - } - return nil -} - -// Get retrieves a list of all process identifiers (PIDs) in the system. -func (self *ProcList) Get() error { - pids, err := windows.EnumProcesses() - if err != nil { - return errors.Wrap(err, "EnumProcesses failed") - } - - // Convert uint32 PIDs to int. - self.List = make([]int, 0, len(pids)) - for _, pid := range pids { - self.List = append(self.List, int(pid)) - } - return nil -} - -func (self *ProcState) Get(pid int) error { - var errs []error - - var err error - self.Name, err = getProcName(pid) - if err != nil { - errs = append(errs, errors.Wrap(err, "getProcName failed")) - } - - self.State, err = getProcStatus(pid) - if err != nil { - errs = append(errs, errors.Wrap(err, "getProcStatus failed")) - } - - self.Ppid, err = getParentPid(pid) - if err != nil { - errs = append(errs, errors.Wrap(err, "getParentPid failed")) - } - - self.Username, err = getProcCredName(pid) - if err != nil { - errs = append(errs, errors.Wrap(err, "getProcCredName failed")) - } - - if len(errs) > 0 { - errStrs := make([]string, 0, len(errs)) - for _, e := range errs { - errStrs = append(errStrs, e.Error()) - } - return errors.New(strings.Join(errStrs, "; ")) - } - return nil -} - -// getProcName returns the process name associated with the PID. -func getProcName(pid int) (string, error) { - handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid)) - if err != nil { - return "", errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - filename, err := windows.GetProcessImageFileName(handle) - if err != nil { - return "", errors.Wrapf(err, "GetProcessImageFileName failed for pid=%v", pid) - } - - return filepath.Base(filename), nil -} - -// getProcStatus returns the status of a process. -func getProcStatus(pid int) (RunState, error) { - handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid)) - if err != nil { - return RunStateUnknown, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - var exitCode uint32 - err = syscall.GetExitCodeProcess(handle, &exitCode) - if err != nil { - return RunStateUnknown, errors.Wrapf(err, "GetExitCodeProcess failed for pid=%v") - } - - if exitCode == 259 { //still active - return RunStateRun, nil - } - return RunStateSleep, nil -} - -// getParentPid returns the parent process ID of a process. -func getParentPid(pid int) (int, error) { - handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid)) - if err != nil { - return RunStateUnknown, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - procInfo, err := windows.NtQueryProcessBasicInformation(handle) - if err != nil { - return 0, errors.Wrapf(err, "NtQueryProcessBasicInformation failed for pid=%v", pid) - } - - return int(procInfo.InheritedFromUniqueProcessID), nil -} - -func getProcCredName(pid int) (string, error) { - handle, err := syscall.OpenProcess(syscall.PROCESS_QUERY_INFORMATION, false, uint32(pid)) - if err != nil { - return "", errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - // Find process token via win32. - var token syscall.Token - err = syscall.OpenProcessToken(handle, syscall.TOKEN_QUERY, &token) - if err != nil { - return "", errors.Wrapf(err, "OpenProcessToken failed for pid=%v", pid) - } - - // Find the token user. - tokenUser, err := token.GetTokenUser() - if err != nil { - return "", errors.Wrapf(err, "GetTokenInformation failed for pid=%v", pid) - } - - // Close token to prevent handle leaks. - err = token.Close() - if err != nil { - return "", errors.Wrapf(err, "failed while closing process token handle for pid=%v", pid) - } - - // Look up domain account by SID. - account, domain, _, err := tokenUser.User.Sid.LookupAccount("") - if err != nil { - sid, sidErr := tokenUser.User.Sid.String() - if sidErr != nil { - return "", errors.Wrapf(err, "failed while looking up account name for pid=%v", pid) - } - return "", errors.Wrapf(err, "failed while looking up account name for SID=%v of pid=%v", sid, pid) - } - - return fmt.Sprintf(`%s\%s`, domain, account), nil -} - -func (self *ProcMem) Get(pid int) error { - handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess|windows.PROCESS_VM_READ, false, uint32(pid)) - if err != nil { - return errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - counters, err := windows.GetProcessMemoryInfo(handle) - if err != nil { - return errors.Wrapf(err, "GetProcessMemoryInfo failed for pid=%v", pid) - } - - self.Resident = uint64(counters.WorkingSetSize) - self.Size = uint64(counters.PrivateUsage) - return nil -} - -func (self *ProcTime) Get(pid int) error { - cpu, err := getProcTimes(pid) - if err != nil { - return err - } - - // Windows epoch times are expressed as time elapsed since midnight on - // January 1, 1601 at Greenwich, England. This converts the Filetime to - // unix epoch in milliseconds. - self.StartTime = uint64(cpu.CreationTime.Nanoseconds() / 1e6) - - // Convert to millis. - self.User = uint64(windows.FiletimeToDuration(&cpu.UserTime).Nanoseconds() / 1e6) - self.Sys = uint64(windows.FiletimeToDuration(&cpu.KernelTime).Nanoseconds() / 1e6) - self.Total = self.User + self.Sys - - return nil -} - -func getProcTimes(pid int) (*syscall.Rusage, error) { - handle, err := syscall.OpenProcess(processQueryLimitedInfoAccess, false, uint32(pid)) - if err != nil { - return nil, errors.Wrapf(err, "OpenProcess failed for pid=%v", pid) - } - defer syscall.CloseHandle(handle) - - var cpu syscall.Rusage - if err := syscall.GetProcessTimes(handle, &cpu.CreationTime, &cpu.ExitTime, &cpu.KernelTime, &cpu.UserTime); err != nil { - return nil, errors.Wrapf(err, "GetProcessTimes failed for pid=%v", pid) - } - - return &cpu, nil -} - -func (self *ProcArgs) Get(pid int) error { - // The minimum supported client for Win32_Process is Windows Vista. - if !version.IsWindowsVistaOrGreater() { - return ErrNotImplemented{runtime.GOOS} - } - - process, err := getWin32Process(int32(pid)) - if err != nil { - return errors.Wrapf(err, "ProcArgs failed for pid=%v", pid) - } - - self.List = []string{process.CommandLine} - return nil -} - -func (self *FileSystemUsage) Get(path string) error { - freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, err := windows.GetDiskFreeSpaceEx(path) - if err != nil { - return errors.Wrap(err, "GetDiskFreeSpaceEx failed") - } - - self.Total = totalNumberOfBytes - self.Free = totalNumberOfFreeBytes - self.Used = self.Total - self.Free - self.Avail = freeBytesAvailable - return nil -} - -// getWin32Process gets information about the process with the given process ID. -// It uses a WMI query to get the information from the local system. -func getWin32Process(pid int32) (Win32_Process, error) { - var dst []Win32_Process - query := fmt.Sprintf("WHERE ProcessId = %d", pid) - q := wmi.CreateQuery(&dst, query) - err := wmi.Query(q, &dst) - if err != nil { - return Win32_Process{}, fmt.Errorf("could not get Win32_Process %s: %v", query, err) - } - if len(dst) < 1 { - return Win32_Process{}, fmt.Errorf("could not get Win32_Process %s: Process not found", query) - } - return dst[0], nil -} - -func getWin32OperatingSystem() (Win32_OperatingSystem, error) { - var dst []Win32_OperatingSystem - q := wmi.CreateQuery(&dst, "") - err := wmi.Query(q, &dst) - if err != nil { - return Win32_OperatingSystem{}, errors.Wrap(err, "wmi query for Win32_OperatingSystem failed") - } - if len(dst) != 1 { - return Win32_OperatingSystem{}, errors.New("wmi query for Win32_OperatingSystem failed") - } - return dst[0], nil -} - -func (self *Rusage) Get(who int) error { - if who != 0 { - return ErrNotImplemented{runtime.GOOS} - } - - pid := os.Getpid() - cpu, err := getProcTimes(pid) - if err != nil { - return err - } - - self.Utime = windows.FiletimeToDuration(&cpu.UserTime) - self.Stime = windows.FiletimeToDuration(&cpu.KernelTime) - - return nil -} diff --git a/vendor/github.com/elastic/gosigar/sys/windows/doc.go b/vendor/github.com/elastic/gosigar/sys/windows/doc.go deleted file mode 100644 index dda57aa..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package windows contains various Windows system call. -package windows diff --git a/vendor/github.com/elastic/gosigar/sys/windows/ntquery.go b/vendor/github.com/elastic/gosigar/sys/windows/ntquery.go deleted file mode 100644 index 85de365..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/ntquery.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "encoding/binary" - "io" - "runtime" - "syscall" - "time" - "unsafe" - - "github.com/pkg/errors" -) - -// On both 32-bit and 64-bit systems NtQuerySystemInformation expects the -// size of SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION to be 48. -const sizeofSystemProcessorPerformanceInformation = 48 - -// ProcessBasicInformation is an equivalent representation of -// PROCESS_BASIC_INFORMATION in the Windows API. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684280(v=vs.85).aspx -type ProcessBasicInformation struct { - ExitStatus uint - PebBaseAddress uintptr - AffinityMask uint - BasePriority uint - UniqueProcessID uint - InheritedFromUniqueProcessID uint -} - -// NtQueryProcessBasicInformation queries basic information about the process -// associated with the given handle (provided by OpenProcess). It uses the -// NtQueryInformationProcess function to collect the data. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684280(v=vs.85).aspx -func NtQueryProcessBasicInformation(handle syscall.Handle) (ProcessBasicInformation, error) { - var processBasicInfo ProcessBasicInformation - processBasicInfoPtr := (*byte)(unsafe.Pointer(&processBasicInfo)) - size := uint32(unsafe.Sizeof(processBasicInfo)) - ntStatus, _ := _NtQueryInformationProcess(handle, 0, processBasicInfoPtr, size, nil) - if ntStatus != 0 { - return ProcessBasicInformation{}, errors.Errorf("NtQueryInformationProcess failed, NTSTATUS=0x%X", ntStatus) - } - - return processBasicInfo, nil -} - -// SystemProcessorPerformanceInformation contains CPU performance information -// for a single CPU. -type SystemProcessorPerformanceInformation struct { - IdleTime time.Duration // Amount of time spent idle. - KernelTime time.Duration // Kernel time does NOT include time spent in idle. - UserTime time.Duration // Amount of time spent executing in user mode. -} - -// _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION is an equivalent representation of -// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION in the Windows API. This struct is -// used internally with NtQuerySystemInformation call and is not exported. The -// exported equivalent is SystemProcessorPerformanceInformation. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx -type _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION struct { - IdleTime int64 - KernelTime int64 - UserTime int64 - Reserved1 [2]int64 - Reserved2 uint32 -} - -// NtQuerySystemProcessorPerformanceInformation queries CPU performance -// information for each CPU. It uses the NtQuerySystemInformation function to -// collect the SystemProcessorPerformanceInformation. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx -func NtQuerySystemProcessorPerformanceInformation() ([]SystemProcessorPerformanceInformation, error) { - // NTSTATUS code for success. - // https://msdn.microsoft.com/en-us/library/cc704588.aspx - const STATUS_SUCCESS = 0 - - // From the _SYSTEM_INFORMATION_CLASS enum. - // http://processhacker.sourceforge.net/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 - const systemProcessorPerformanceInformation = 8 - - // Create a buffer large enough to hold an entry for each processor. - b := make([]byte, runtime.NumCPU()*sizeofSystemProcessorPerformanceInformation) - - // Query the performance information. Note that this function uses 0 to - // indicate success. Most other Windows functions use non-zero for success. - var returnLength uint32 - ntStatus, _ := _NtQuerySystemInformation(systemProcessorPerformanceInformation, &b[0], uint32(len(b)), &returnLength) - if ntStatus != STATUS_SUCCESS { - return nil, errors.Errorf("NtQuerySystemInformation failed, NTSTATUS=0x%X, bufLength=%v, returnLength=%v", ntStatus, len(b), returnLength) - } - - return readSystemProcessorPerformanceInformationBuffer(b) -} - -// readSystemProcessorPerformanceInformationBuffer reads from a buffer -// containing SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION data. The buffer should -// contain one entry for each CPU. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724509(v=vs.85).aspx -func readSystemProcessorPerformanceInformationBuffer(b []byte) ([]SystemProcessorPerformanceInformation, error) { - n := len(b) / sizeofSystemProcessorPerformanceInformation - r := bytes.NewReader(b) - - rtn := make([]SystemProcessorPerformanceInformation, 0, n) - for i := 0; i < n; i++ { - _, err := r.Seek(int64(i*sizeofSystemProcessorPerformanceInformation), io.SeekStart) - if err != nil { - return nil, errors.Wrapf(err, "failed to seek to cpuN=%v in buffer", i) - } - - times := make([]uint64, 3) - for j := range times { - err := binary.Read(r, binary.LittleEndian, ×[j]) - if err != nil { - return nil, errors.Wrapf(err, "failed reading cpu times for cpuN=%v", i) - } - } - - idleTime := time.Duration(times[0] * 100) - kernelTime := time.Duration(times[1] * 100) - userTime := time.Duration(times[2] * 100) - - rtn = append(rtn, SystemProcessorPerformanceInformation{ - IdleTime: idleTime, - KernelTime: kernelTime - idleTime, // Subtract out idle time from kernel time. - UserTime: userTime, - }) - } - - return rtn, nil -} diff --git a/vendor/github.com/elastic/gosigar/sys/windows/privileges.go b/vendor/github.com/elastic/gosigar/sys/windows/privileges.go deleted file mode 100644 index 28c78fd..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/privileges.go +++ /dev/null @@ -1,272 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "runtime" - "strings" - "sync" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// Cache of privilege names to LUIDs. -var ( - privNames = make(map[string]int64) - privNameMutex sync.Mutex -) - -const ( - // SeDebugPrivilege is the name of the privilege used to debug programs. - SeDebugPrivilege = "SeDebugPrivilege" -) - -// Errors returned by AdjustTokenPrivileges. -const ( - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 -) - -// Attribute bits for privileges. -const ( - _SE_PRIVILEGE_ENABLED_BY_DEFAULT uint32 = 0x00000001 - _SE_PRIVILEGE_ENABLED uint32 = 0x00000002 - _SE_PRIVILEGE_REMOVED uint32 = 0x00000004 - _SE_PRIVILEGE_USED_FOR_ACCESS uint32 = 0x80000000 -) - -// Privilege contains information about a single privilege associated with a -// Token. -type Privilege struct { - LUID int64 `json:"-"` // Locally unique identifier (guaranteed only until the system is restarted). - Name string `json:"-"` - EnabledByDefault bool `json:"enabled_by_default,omitempty"` - Enabled bool `json:"enabled"` - Removed bool `json:"removed,omitempty"` - Used bool `json:"used,omitempty"` -} - -func (p Privilege) String() string { - var buf bytes.Buffer - buf.WriteString(p.Name) - buf.WriteString("=(") - - opts := make([]string, 0, 4) - if p.EnabledByDefault { - opts = append(opts, "Default") - } - if p.Enabled { - opts = append(opts, "Enabled") - } - if !p.EnabledByDefault && !p.Enabled { - opts = append(opts, "Disabled") - } - if p.Removed { - opts = append(opts, "Removed") - } - if p.Used { - opts = append(opts, "Used") - } - - buf.WriteString(strings.Join(opts, ", ")) - buf.WriteString(")") - - // Example: SeDebugPrivilege=(Default, Enabled) - return buf.String() -} - -// User represent the information about a Windows account. -type User struct { - SID string - Account string - Domain string - Type uint32 -} - -func (u User) String() string { - return fmt.Sprintf(`User:%v\%v, SID:%v, Type:%v`, u.Domain, u.Account, u.SID, u.Type) -} - -// DebugInfo contains general debug info about the current process. -type DebugInfo struct { - OSVersion Version // OS version info. - Arch string // Architecture of the machine. - NumCPU int // Number of CPUs. - User User // User that this process is running as. - ProcessPrivs map[string]Privilege // Privileges held by the process. -} - -func (d DebugInfo) String() string { - bytes, _ := json.Marshal(d) - return string(bytes) -} - -// LookupPrivilegeName looks up a privilege name given a LUID value. -func LookupPrivilegeName(systemName string, luid int64) (string, error) { - buf := make([]uint16, 256) - bufSize := uint32(len(buf)) - err := _LookupPrivilegeName(systemName, &luid, &buf[0], &bufSize) - if err != nil { - return "", errors.Wrapf(err, "LookupPrivilegeName failed for luid=%v", luid) - } - - return syscall.UTF16ToString(buf), nil -} - -// mapPrivileges maps privilege names to LUID values. -func mapPrivileges(names []string) ([]int64, error) { - var privileges []int64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := _LookupPrivilegeValue("", name, &p) - if err != nil { - return nil, errors.Wrapf(err, "LookupPrivilegeValue failed on '%v'", name) - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableTokenPrivileges enables the specified privileges in the given -// Token. The token must have TOKEN_ADJUST_PRIVILEGES access. If the token -// does not already contain the privilege it cannot be enabled. -func EnableTokenPrivileges(token syscall.Token, privileges ...string) error { - privValues, err := mapPrivileges(privileges) - if err != nil { - return err - } - - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privValues))) - for _, p := range privValues { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, uint32(_SE_PRIVILEGE_ENABLED)) - } - - success, err := _AdjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(b.Len()), nil, nil) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return errors.Wrap(err, "error not all privileges were assigned") - } - - return nil -} - -// GetTokenPrivileges returns a list of privileges associated with a token. -// The provided token must have at a minimum TOKEN_QUERY access. This is a -// wrapper around the GetTokenInformation function. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa446671(v=vs.85).aspx -func GetTokenPrivileges(token syscall.Token) (map[string]Privilege, error) { - // Determine the required buffer size. - var size uint32 - syscall.GetTokenInformation(token, syscall.TokenPrivileges, nil, 0, &size) - - // This buffer will receive a TOKEN_PRIVILEGE structure. - b := bytes.NewBuffer(make([]byte, size)) - err := syscall.GetTokenInformation(token, syscall.TokenPrivileges, &b.Bytes()[0], uint32(b.Len()), &size) - if err != nil { - return nil, errors.Wrap(err, "GetTokenInformation failed") - } - - var privilegeCount uint32 - err = binary.Read(b, binary.LittleEndian, &privilegeCount) - if err != nil { - return nil, errors.Wrap(err, "failed to read PrivilegeCount") - } - - rtn := make(map[string]Privilege, privilegeCount) - for i := 0; i < int(privilegeCount); i++ { - var luid int64 - err = binary.Read(b, binary.LittleEndian, &luid) - if err != nil { - return nil, errors.Wrap(err, "failed to read LUID value") - } - - var attributes uint32 - err = binary.Read(b, binary.LittleEndian, &attributes) - if err != nil { - return nil, errors.Wrap(err, "failed to read attributes") - } - - name, err := LookupPrivilegeName("", luid) - if err != nil { - return nil, errors.Wrapf(err, "LookupPrivilegeName failed for LUID=%v", luid) - } - - rtn[name] = Privilege{ - LUID: luid, - Name: name, - EnabledByDefault: (attributes & _SE_PRIVILEGE_ENABLED_BY_DEFAULT) > 0, - Enabled: (attributes & _SE_PRIVILEGE_ENABLED) > 0, - Removed: (attributes & _SE_PRIVILEGE_REMOVED) > 0, - Used: (attributes & _SE_PRIVILEGE_USED_FOR_ACCESS) > 0, - } - } - - return rtn, nil -} - -// GetTokenUser returns the User associated with the given Token. -func GetTokenUser(token syscall.Token) (User, error) { - tokenUser, err := token.GetTokenUser() - if err != nil { - return User{}, errors.Wrap(err, "GetTokenUser failed") - } - - var user User - user.SID, err = tokenUser.User.Sid.String() - if err != nil { - return user, errors.Wrap(err, "ConvertSidToStringSid failed") - } - - user.Account, user.Domain, user.Type, err = tokenUser.User.Sid.LookupAccount("") - if err != nil { - return user, errors.Wrap(err, "LookupAccountSid failed") - } - - return user, nil -} - -// GetDebugInfo returns general debug info about the current process. -func GetDebugInfo() (*DebugInfo, error) { - h, err := windows.GetCurrentProcess() - if err != nil { - return nil, err - } - - var token syscall.Token - err = syscall.OpenProcessToken(syscall.Handle(h), syscall.TOKEN_QUERY, &token) - if err != nil { - return nil, err - } - - privs, err := GetTokenPrivileges(token) - if err != nil { - return nil, err - } - - user, err := GetTokenUser(token) - if err != nil { - return nil, err - } - - return &DebugInfo{ - User: user, - ProcessPrivs: privs, - OSVersion: GetWindowsVersion(), - Arch: runtime.GOARCH, - NumCPU: runtime.NumCPU(), - }, nil -} diff --git a/vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go b/vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go deleted file mode 100644 index 88df0fe..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/syscall_windows.go +++ /dev/null @@ -1,385 +0,0 @@ -package windows - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "github.com/pkg/errors" -) - -var ( - sizeofUint32 = 4 - sizeofProcessEntry32 = uint32(unsafe.Sizeof(ProcessEntry32{})) - sizeofProcessMemoryCountersEx = uint32(unsafe.Sizeof(ProcessMemoryCountersEx{})) - sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MemoryStatusEx{})) -) - -// Process-specific access rights. Others are declared in the syscall package. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx -const ( - PROCESS_QUERY_LIMITED_INFORMATION uint32 = 0x1000 - PROCESS_VM_READ uint32 = 0x0010 -) - -// MAX_PATH is the maximum length for a path in Windows. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx -const MAX_PATH = 260 - -// DriveType represents a type of drive (removable, fixed, CD-ROM, RAM disk, or -// network drive). -type DriveType uint32 - -// Drive types as returned by GetDriveType. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364939(v=vs.85).aspx -const ( - DRIVE_UNKNOWN DriveType = iota - DRIVE_NO_ROOT_DIR - DRIVE_REMOVABLE - DRIVE_FIXED - DRIVE_REMOTE - DRIVE_CDROM - DRIVE_RAMDISK -) - -func (dt DriveType) String() string { - names := map[DriveType]string{ - DRIVE_UNKNOWN: "unknown", - DRIVE_NO_ROOT_DIR: "invalid", - DRIVE_REMOVABLE: "removable", - DRIVE_FIXED: "fixed", - DRIVE_REMOTE: "remote", - DRIVE_CDROM: "cdrom", - DRIVE_RAMDISK: "ramdisk", - } - - name, found := names[dt] - if !found { - return "unknown DriveType value" - } - return name -} - -// Flags that can be used with CreateToolhelp32Snapshot. -const ( - TH32CS_INHERIT uint32 = 0x80000000 // Indicates that the snapshot handle is to be inheritable. - TH32CS_SNAPHEAPLIST uint32 = 0x00000001 // Includes all heaps of the process specified in th32ProcessID in the snapshot. - TH32CS_SNAPMODULE uint32 = 0x00000008 // Includes all modules of the process specified in th32ProcessID in the snapshot. - TH32CS_SNAPMODULE32 uint32 = 0x00000010 // Includes all 32-bit modules of the process specified in th32ProcessID in the snapshot when called from a 64-bit process. - TH32CS_SNAPPROCESS uint32 = 0x00000002 // Includes all processes in the system in the snapshot. - TH32CS_SNAPTHREAD uint32 = 0x00000004 // Includes all threads in the system in the snapshot. -) - -// ProcessEntry32 is an equivalent representation of PROCESSENTRY32 in the -// Windows API. It contains a process's information. Do not modify or reorder. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684839(v=vs.85).aspx -type ProcessEntry32 struct { - size uint32 - CntUsage uint32 - ProcessID uint32 - DefaultHeapID uintptr - ModuleID uint32 - CntThreads uint32 - ParentProcessID uint32 - PriorityClassBase int32 - Flags uint32 - exeFile [MAX_PATH]uint16 -} - -// ExeFile returns the name of the executable file for the process. It does -// not contain the full path. -func (p ProcessEntry32) ExeFile() string { - return syscall.UTF16ToString(p.exeFile[:]) -} - -func (p ProcessEntry32) String() string { - return fmt.Sprintf("{CntUsage:%v ProcessID:%v DefaultHeapID:%v ModuleID:%v "+ - "CntThreads:%v ParentProcessID:%v PriorityClassBase:%v Flags:%v ExeFile:%v", - p.CntUsage, p.ProcessID, p.DefaultHeapID, p.ModuleID, p.CntThreads, - p.ParentProcessID, p.PriorityClassBase, p.Flags, p.ExeFile()) -} - -// MemoryStatusEx is an equivalent representation of MEMORYSTATUSEX in the -// Windows API. It contains information about the current state of both physical -// and virtual memory, including extended memory. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770 -type MemoryStatusEx struct { - length uint32 - MemoryLoad uint32 - TotalPhys uint64 - AvailPhys uint64 - TotalPageFile uint64 - AvailPageFile uint64 - TotalVirtual uint64 - AvailVirtual uint64 - AvailExtendedVirtual uint64 -} - -// ProcessMemoryCountersEx is an equivalent representation of -// PROCESS_MEMORY_COUNTERS_EX in the Windows API. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684874(v=vs.85).aspx -type ProcessMemoryCountersEx struct { - cb uint32 - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -// GetLogicalDriveStrings returns a list of drives in the system. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364975(v=vs.85).aspx -func GetLogicalDriveStrings() ([]string, error) { - // Determine the size of the buffer required to receive all drives. - bufferLength, err := _GetLogicalDriveStringsW(0, nil) - if err != nil { - return nil, errors.Wrap(err, "GetLogicalDriveStringsW failed to get buffer length") - } - if bufferLength < 0 { - return nil, errors.New("GetLogicalDriveStringsW returned an invalid buffer length") - } - - buffer := make([]uint16, bufferLength) - _, err = _GetLogicalDriveStringsW(uint32(len(buffer)), &buffer[0]) - if err != nil { - return nil, errors.Wrap(err, "GetLogicalDriveStringsW failed") - } - - // Split the uint16 slice at null-terminators. - var startIdx int - var drivesUTF16 [][]uint16 - for i, value := range buffer { - if value == 0 { - drivesUTF16 = append(drivesUTF16, buffer[startIdx:i]) - startIdx = i + 1 - } - } - - // Convert the utf16 slices to strings. - drives := make([]string, 0, len(drivesUTF16)) - for _, driveUTF16 := range drivesUTF16 { - if len(driveUTF16) > 0 { - drives = append(drives, syscall.UTF16ToString(driveUTF16)) - } - } - - return drives, nil -} - -// GlobalMemoryStatusEx retrieves information about the system's current usage -// of both physical and virtual memory. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -func GlobalMemoryStatusEx() (MemoryStatusEx, error) { - memoryStatusEx := MemoryStatusEx{length: sizeofMemoryStatusEx} - err := _GlobalMemoryStatusEx(&memoryStatusEx) - if err != nil { - return MemoryStatusEx{}, errors.Wrap(err, "GlobalMemoryStatusEx failed") - } - - return memoryStatusEx, nil -} - -// GetProcessMemoryInfo retrieves information about the memory usage of the -// specified process. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683219(v=vs.85).aspx -func GetProcessMemoryInfo(handle syscall.Handle) (ProcessMemoryCountersEx, error) { - processMemoryCountersEx := ProcessMemoryCountersEx{cb: sizeofProcessMemoryCountersEx} - err := _GetProcessMemoryInfo(handle, &processMemoryCountersEx, processMemoryCountersEx.cb) - if err != nil { - return ProcessMemoryCountersEx{}, errors.Wrap(err, "GetProcessMemoryInfo failed") - } - - return processMemoryCountersEx, nil -} - -// GetProcessImageFileName Retrieves the name of the executable file for the -// specified process. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683217(v=vs.85).aspx -func GetProcessImageFileName(handle syscall.Handle) (string, error) { - buffer := make([]uint16, MAX_PATH) - _, err := _GetProcessImageFileName(handle, &buffer[0], uint32(len(buffer))) - if err != nil { - return "", errors.Wrap(err, "GetProcessImageFileName failed") - } - - return syscall.UTF16ToString(buffer), nil -} - -// GetSystemTimes retrieves system timing information. On a multiprocessor -// system, the values returned are the sum of the designated times across all -// processors. The returned kernel time does not include the system idle time. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724400(v=vs.85).aspx -func GetSystemTimes() (idle, kernel, user time.Duration, err error) { - var idleTime, kernelTime, userTime syscall.Filetime - err = _GetSystemTimes(&idleTime, &kernelTime, &userTime) - if err != nil { - return 0, 0, 0, errors.Wrap(err, "GetSystemTimes failed") - } - - idle = FiletimeToDuration(&idleTime) - kernel = FiletimeToDuration(&kernelTime) // Kernel time includes idle time so we subtract it out. - user = FiletimeToDuration(&userTime) - - return idle, kernel - idle, user, nil -} - -// FiletimeToDuration converts a Filetime to a time.Duration. Do not use this -// method to convert a Filetime to an actual clock time, for that use -// Filetime.Nanosecond(). -func FiletimeToDuration(ft *syscall.Filetime) time.Duration { - n := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) // in 100-nanosecond intervals - return time.Duration(n * 100) -} - -// GetDriveType Determines whether a disk drive is a removable, fixed, CD-ROM, -// RAM disk, or network drive. A trailing backslash is required on the -// rootPathName. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364939 -func GetDriveType(rootPathName string) (DriveType, error) { - rootPathNamePtr, err := syscall.UTF16PtrFromString(rootPathName) - if err != nil { - return DRIVE_UNKNOWN, errors.Wrapf(err, "UTF16PtrFromString failed for rootPathName=%v", rootPathName) - } - - dt, err := _GetDriveType(rootPathNamePtr) - if err != nil { - return DRIVE_UNKNOWN, errors.Wrapf(err, "GetDriveType failed for rootPathName=%v", rootPathName) - } - - return dt, nil -} - -// EnumProcesses retrieves the process identifier for each process object in the -// system. This function can return a max of 65536 PIDs. If there are more -// processes than that then this will not return them all. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms682629(v=vs.85).aspx -func EnumProcesses() ([]uint32, error) { - enumProcesses := func(size int) ([]uint32, error) { - var ( - pids = make([]uint32, size) - sizeBytes = len(pids) * sizeofUint32 - bytesWritten uint32 - ) - - err := _EnumProcesses(&pids[0], uint32(sizeBytes), &bytesWritten) - - pidsWritten := int(bytesWritten) / sizeofUint32 - if int(bytesWritten)%sizeofUint32 != 0 || pidsWritten > len(pids) { - return nil, errors.Errorf("EnumProcesses returned an invalid bytesWritten value of %v", bytesWritten) - } - pids = pids[:pidsWritten] - - return pids, err - } - - // Retry the EnumProcesses call with larger arrays if needed. - size := 2048 - var pids []uint32 - for tries := 0; tries < 5; tries++ { - var err error - pids, err = enumProcesses(size) - if err != nil { - return nil, errors.Wrap(err, "EnumProcesses failed") - } - - if len(pids) < size { - break - } - - // Increase the size the pids array and retry the enumProcesses call - // because the array wasn't large enough to hold all of the processes. - size *= 2 - } - - return pids, nil -} - -// GetDiskFreeSpaceEx retrieves information about the amount of space that is -// available on a disk volume, which is the total amount of space, the total -// amount of free space, and the total amount of free space available to the -// user that is associated with the calling thread. -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx -func GetDiskFreeSpaceEx(directoryName string) (freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes uint64, err error) { - directoryNamePtr, err := syscall.UTF16PtrFromString(directoryName) - if err != nil { - return 0, 0, 0, errors.Wrapf(err, "UTF16PtrFromString failed for directoryName=%v", directoryName) - } - - err = _GetDiskFreeSpaceEx(directoryNamePtr, &freeBytesAvailable, &totalNumberOfBytes, &totalNumberOfFreeBytes) - if err != nil { - return 0, 0, 0, err - } - - return freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, nil -} - -// CreateToolhelp32Snapshot takes a snapshot of the specified processes, as well -// as the heaps, modules, and threads used by these processes. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms682489(v=vs.85).aspx -func CreateToolhelp32Snapshot(flags, pid uint32) (syscall.Handle, error) { - h, err := _CreateToolhelp32Snapshot(flags, pid) - if err != nil { - return syscall.InvalidHandle, err - } - if h == syscall.InvalidHandle { - return syscall.InvalidHandle, syscall.GetLastError() - } - - return h, nil -} - -// Process32First retrieves information about the first process encountered in a -// system snapshot. -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684834 -func Process32First(handle syscall.Handle) (ProcessEntry32, error) { - processEntry32 := ProcessEntry32{size: sizeofProcessEntry32} - err := _Process32First(handle, &processEntry32) - if err != nil { - return ProcessEntry32{}, errors.Wrap(err, "Process32First failed") - } - - return processEntry32, nil -} - -// Process32Next retrieves information about the next process recorded in a -// system snapshot. When there are no more processes to iterate then -// syscall.ERROR_NO_MORE_FILES is returned (use errors.Cause() to unwrap). -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms684836 -func Process32Next(handle syscall.Handle) (ProcessEntry32, error) { - processEntry32 := ProcessEntry32{size: sizeofProcessEntry32} - err := _Process32Next(handle, &processEntry32) - if err != nil { - return ProcessEntry32{}, errors.Wrap(err, "Process32Next failed") - } - - return processEntry32, nil -} - -// Use "GOOS=windows go generate -v -x ." to generate the source. - -// Add -trace to enable debug prints around syscalls. -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go - -// Windows API calls -//sys _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) = kernel32.GlobalMemoryStatusEx -//sys _GetLogicalDriveStringsW(bufferLength uint32, buffer *uint16) (length uint32, err error) = kernel32.GetLogicalDriveStringsW -//sys _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) = psapi.GetProcessMemoryInfo -//sys _GetProcessImageFileName(handle syscall.Handle, outImageFileName *uint16, size uint32) (length uint32, err error) = psapi.GetProcessImageFileNameW -//sys _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) = kernel32.GetSystemTimes -//sys _GetDriveType(rootPathName *uint16) (dt DriveType, err error) = kernel32.GetDriveTypeW -//sys _EnumProcesses(processIds *uint32, sizeBytes uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses -//sys _GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailable *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = kernel32.GetDiskFreeSpaceExW -//sys _Process32First(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) = kernel32.Process32FirstW -//sys _Process32Next(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) = kernel32.Process32NextW -//sys _CreateToolhelp32Snapshot(flags uint32, processID uint32) (handle syscall.Handle, err error) = kernel32.CreateToolhelp32Snapshot -//sys _NtQuerySystemInformation(systemInformationClass uint32, systemInformation *byte, systemInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) = ntdll.NtQuerySystemInformation -//sys _NtQueryInformationProcess(processHandle syscall.Handle, processInformationClass uint32, processInformation *byte, processInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) = ntdll.NtQueryInformationProcess -//sys _LookupPrivilegeName(systemName string, luid *int64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys _LookupPrivilegeValue(systemName string, name string, luid *int64) (err error) = advapi32.LookupPrivilegeValueW -//sys _AdjustTokenPrivileges(token syscall.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges diff --git a/vendor/github.com/elastic/gosigar/sys/windows/version.go b/vendor/github.com/elastic/gosigar/sys/windows/version.go deleted file mode 100644 index d0bca89..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/version.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package windows - -import ( - "fmt" - "syscall" -) - -// Version identifies a Windows version by major, minor, and build number. -type Version struct { - Major int - Minor int - Build int -} - -// GetWindowsVersion returns the Windows version information. Applications not -// manifested for Windows 8.1 or Windows 10 will return the Windows 8 OS version -// value (6.2). -// -// For a table of version numbers see: -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -func GetWindowsVersion() Version { - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx - ver, err := syscall.GetVersion() - if err != nil { - // GetVersion should never return an error. - panic(fmt.Errorf("GetVersion failed: %v", err)) - } - - return Version{ - Major: int(ver & 0xFF), - Minor: int(ver >> 8 & 0xFF), - Build: int(ver >> 16), - } -} - -// IsWindowsVistaOrGreater returns true if the Windows version is Vista or -// greater. -func (v Version) IsWindowsVistaOrGreater() bool { - // Vista is 6.0. - return v.Major >= 6 && v.Minor >= 0 -} diff --git a/vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go b/vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go deleted file mode 100644 index ff7518a..0000000 --- a/vendor/github.com/elastic/gosigar/sys/windows/zsyscall_windows.go +++ /dev/null @@ -1,262 +0,0 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT - -package windows - -import ( - "syscall" - "unsafe" -) - -var _ unsafe.Pointer - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - modpsapi = syscall.NewLazyDLL("psapi.dll") - modntdll = syscall.NewLazyDLL("ntdll.dll") - modadvapi32 = syscall.NewLazyDLL("advapi32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW") - procGetSystemTimes = modkernel32.NewProc("GetSystemTimes") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") - procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") -) - -func _GlobalMemoryStatusEx(buffer *MemoryStatusEx) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetLogicalDriveStringsW(bufferLength uint32, buffer *uint16) (length uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - length = uint32(r0) - if length == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetProcessMemoryInfo(handle syscall.Handle, psmemCounters *ProcessMemoryCountersEx, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(psmemCounters)), uintptr(cb)) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetProcessImageFileName(handle syscall.Handle, outImageFileName *uint16, size uint32) (length uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(outImageFileName)), uintptr(size)) - length = uint32(r0) - if length == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetSystemTimes(idleTime *syscall.Filetime, kernelTime *syscall.Filetime, userTime *syscall.Filetime) (err error) { - r1, _, e1 := syscall.Syscall(procGetSystemTimes.Addr(), 3, uintptr(unsafe.Pointer(idleTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetDriveType(rootPathName *uint16) (dt DriveType, err error) { - r0, _, e1 := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - dt = DriveType(r0) - if dt == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _EnumProcesses(processIds *uint32, sizeBytes uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(sizeBytes), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailable *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailable)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _Process32First(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(processEntry32)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _Process32Next(handle syscall.Handle, processEntry32 *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(processEntry32)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _CreateToolhelp32Snapshot(flags uint32, processID uint32) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processID), 0) - handle = syscall.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _NtQuerySystemInformation(systemInformationClass uint32, systemInformation *byte, systemInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) { - r0, _, e1 := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInformationClass), uintptr(unsafe.Pointer(systemInformation)), uintptr(systemInformationLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - ntstatus = uint32(r0) - if ntstatus == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _NtQueryInformationProcess(processHandle syscall.Handle, processInformationClass uint32, processInformation *byte, processInformationLength uint32, returnLength *uint32) (ntstatus uint32, err error) { - r0, _, e1 := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInformationClass), uintptr(unsafe.Pointer(processInformation)), uintptr(processInformationLength), uintptr(unsafe.Pointer(returnLength)), 0) - ntstatus = uint32(r0) - if ntstatus == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _LookupPrivilegeName(systemName string, luid *int64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return __LookupPrivilegeName(_p0, luid, buffer, size) -} - -func __LookupPrivilegeName(systemName *uint16, luid *int64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _LookupPrivilegeValue(systemName string, name string, luid *int64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return __LookupPrivilegeValue(_p0, _p1, luid) -} - -func __LookupPrivilegeValue(systemName *uint16, name *uint16, luid *int64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func _AdjustTokenPrivileges(token syscall.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/ethereum/go-ethereum/.dockerignore b/vendor/github.com/ethereum/go-ethereum/.dockerignore deleted file mode 100644 index 0c013d1..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -**/*_test.go - -build/_workspace -build/_bin -tests/testdata diff --git a/vendor/github.com/ethereum/go-ethereum/.gitattributes b/vendor/github.com/ethereum/go-ethereum/.gitattributes deleted file mode 100644 index 0269fab..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -# Auto detect text files and perform LF normalization -* text=auto -*.sol linguist-language=Solidity diff --git a/vendor/github.com/ethereum/go-ethereum/.gitignore b/vendor/github.com/ethereum/go-ethereum/.gitignore deleted file mode 100644 index 1ee8b83..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.gitignore +++ /dev/null @@ -1,49 +0,0 @@ -# See http://help.github.com/ignore-files/ for more about ignoring files. -# -# If you find yourself ignoring temporary files generated by your text editor -# or operating system, you probably want to add a global ignore instead: -# git config --global core.excludesfile ~/.gitignore_global - -/tmp -*/**/*un~ -*/**/*.test -*un~ -.DS_Store -*/**/.DS_Store -.ethtest -*/**/*tx_database* -*/**/*dapps* -build/_vendor/pkg - -#* -.#* -*# -*~ -.project -.settings - -# used by the Makefile -/build/_workspace/ -/build/cache/ -/build/bin/ -/geth*.zip - -# travis -profile.tmp -profile.cov - -# IdeaIDE -.idea - -# VS Code -.vscode - -# dashboard -/dashboard/assets/flow-typed -/dashboard/assets/node_modules -/dashboard/assets/stats.json -/dashboard/assets/bundle.js -/dashboard/assets/bundle.js.map -/dashboard/assets/package-lock.json - -**/yarn-error.log diff --git a/vendor/github.com/ethereum/go-ethereum/.gitmodules b/vendor/github.com/ethereum/go-ethereum/.gitmodules deleted file mode 100644 index 32bdb3b..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "tests"] - path = tests/testdata - url = https://github.com/ethereum/tests diff --git a/vendor/github.com/ethereum/go-ethereum/.golangci.yml b/vendor/github.com/ethereum/go-ethereum/.golangci.yml deleted file mode 100644 index 24d00da..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.golangci.yml +++ /dev/null @@ -1,50 +0,0 @@ -# This file configures github.com/golangci/golangci-lint. - -run: - timeout: 2m - tests: true - # default is true. Enables skipping of directories: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - skip-dirs-use-default: true - skip-files: - - core/genesis_alloc.go - -linters: - disable-all: true - enable: - - deadcode - - goconst - - goimports - - gosimple - - govet - - ineffassign - - misspell - # - staticcheck - - unconvert - # - unused - - varcheck - -linters-settings: - gofmt: - simplify: true - goconst: - min-len: 3 # minimum length of string constant - min-occurrences: 6 # minimum number of occurrences - -issues: - exclude-rules: - - path: crypto/blake2b/ - linters: - - deadcode - - path: crypto/bn256/cloudflare - linters: - - deadcode - - path: p2p/discv5/ - linters: - - deadcode - - path: core/vm/instructions_test.go - linters: - - goconst - - path: cmd/faucet/ - linters: - - deadcode diff --git a/vendor/github.com/ethereum/go-ethereum/.mailmap b/vendor/github.com/ethereum/go-ethereum/.mailmap deleted file mode 100644 index cc4b871..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.mailmap +++ /dev/null @@ -1,123 +0,0 @@ -Jeffrey Wilcke -Jeffrey Wilcke -Jeffrey Wilcke -Jeffrey Wilcke - -Viktor Trón - -Joseph Goulden - -Nick Savers - -Maran Hidskes - -Taylor Gerring -Taylor Gerring - -Bas van Kervel -Bas van Kervel -Bas van Kervel -Bas van Kervel - -Sven Ehlert - -Vitalik Buterin - -Marian Oancea - -Christoph Jentzsch - -Heiko Hees - -Alex Leverington -Alex Leverington - -Zsolt Felföldi - -Gavin Wood - -Martin Becze -Martin Becze - -Dimitry Khokhlov - -Roman Mandeleil - -Alec Perseghin - -Alon Muroch - -Arkadiy Paronyan - -Jae Kwon - -Aaron Kumavis - -Nick Dodson - -Jason Carver -Jason Carver - -Joseph Chow -Joseph Chow ethers - -Enrique Fynn - -Vincent G - -RJ Catalano -RJ Catalano - -Nchinda Nchinda - -Aron Fischer - -Vlad Gluhovsky - -Ville Sundell - -Elliot Shepherd - -Yohann Léon - -Gregg Dourgarian - -Casey Detrio - -Jens Agerberg - -Nick Johnson - -Henning Diedrich -Henning Diedrich Drake Burroughs - -Felix Lange -Felix Lange - -Максим Чусовлянов - -Louis Holbrook -Louis Holbrook - -Thomas Bocek - -Victor Tran - -Justin Drake - -Frank Wang - -Gary Rong - -Guillaume Nicolas - -Sorin Neacsu -Sorin Neacsu - -Valentin Wüstholz -Valentin Wüstholz - -Armin Braun - -Ernesto del Toro -Ernesto del Toro diff --git a/vendor/github.com/ethereum/go-ethereum/.travis.yml b/vendor/github.com/ethereum/go-ethereum/.travis.yml deleted file mode 100644 index 288c579..0000000 --- a/vendor/github.com/ethereum/go-ethereum/.travis.yml +++ /dev/null @@ -1,241 +0,0 @@ -language: go -go_import_path: github.com/ethereum/go-ethereum -sudo: false -jobs: - include: - # This builder only tests code linters on latest version of Go - - stage: lint - os: linux - dist: xenial - go: 1.13.x - env: - - lint - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go lint - - - stage: build - os: linux - dist: xenial - go: 1.11.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: linux - dist: xenial - go: 1.12.x - env: - - GO111MODULE=on - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - # These are the latest Go versions. - - stage: build - os: linux - arch: amd64 - dist: xenial - go: 1.13.x - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - if: type = pull_request - os: linux - arch: arm64 - dist: xenial - go: 1.13.x - script: - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - - stage: build - os: osx - osx_image: xcode11.3 - go: 1.13.x - script: - - echo "Increase the maximum number of open file descriptors on macOS" - - NOFILE=20480 - - sudo sysctl -w kern.maxfiles=$NOFILE - - sudo sysctl -w kern.maxfilesperproc=$NOFILE - - sudo launchctl limit maxfiles $NOFILE $NOFILE - - sudo launchctl limit maxfiles - - ulimit -S -n $NOFILE - - ulimit -n - - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - - go run build/ci.go install - - go run build/ci.go test -coverage $TEST_PACKAGES - - # This builder does the Ubuntu PPA upload - - stage: build - if: type = push - os: linux - dist: xenial - go: 1.13.x - env: - - ubuntu-ppa - git: - submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - devscripts - - debhelper - - dput - - fakeroot - - python-bzrlib - - python-paramiko - script: - - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - - go run build/ci.go debsrc -goversion 1.13.8 -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder " - - # This builder does the Linux Azure uploads - - stage: build - if: type = push - os: linux - dist: xenial - sudo: required - go: 1.13.x - env: - - azure-linux - git: - submodules: false # avoid cloning ethereum/tests - addons: - apt: - packages: - - gcc-multilib - script: - # Build for the primary platforms that Trusty can manage - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch 386 - - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - # Switch over GCC to cross compilation (breaks 386, hence why do it here only) - - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - - sudo ln -s /usr/include/asm-generic /usr/include/asm - - - GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc - - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc - - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - # This builder does the Linux Azure MIPS xgo uploads - - stage: build - if: type = push - os: linux - dist: xenial - services: - - docker - go: 1.13.x - env: - - azure-linux-mips - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds - - # This builder does the Android Maven and Azure uploads - - stage: build - if: type = push - os: linux - dist: xenial - addons: - apt: - packages: - - oracle-java8-installer - - oracle-java8-set-default - language: android - android: - components: - - platform-tools - - tools - - android-15 - - android-19 - - android-24 - env: - - azure-android - - maven-android - git: - submodules: false # avoid cloning ethereum/tests - before_install: - - curl https://dl.google.com/go/go1.13.8.linux-amd64.tar.gz | tar -xz - - export PATH=`pwd`/go/bin:$PATH - - export GOROOT=`pwd`/go - - export GOPATH=$HOME/go - script: - # Build the Android archive and upload it to Maven Central and Azure - - curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip - - unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip - - mv android-ndk-r19b $ANDROID_HOME/ndk-bundle - - - mkdir -p $GOPATH/src/github.com/ethereum - - ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum - - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds - - # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - - stage: build - if: type = push - os: osx - go: 1.13.x - env: - - azure-osx - - azure-ios - - cocoapods-ios - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go install - - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds - - # Build the iOS framework and upload it to CocoaPods and Azure - - gem uninstall cocoapods -a -x - - gem install cocoapods - - - mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak - - sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb - - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi - - - xctool -version - - xcrun simctl list - - # Workaround for https://github.com/golang/go/issues/23749 - - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc' - - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds - - # This builder does the Azure archive purges to avoid accumulating junk - - stage: build - if: type = cron - os: linux - dist: xenial - go: 1.13.x - env: - - azure-purge - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go purge -store gethstore/builds -days 14 diff --git a/vendor/github.com/ethereum/go-ethereum/Dockerfile b/vendor/github.com/ethereum/go-ethereum/Dockerfile deleted file mode 100644 index 114e762..0000000 --- a/vendor/github.com/ethereum/go-ethereum/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder - -RUN apk add --no-cache make gcc musl-dev linux-headers git - -ADD . /go-ethereum -RUN cd /go-ethereum && make geth - -# Pull Geth into a second stage deploy alpine container -FROM alpine:latest - -RUN apk add --no-cache ca-certificates -COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ - -EXPOSE 8545 8546 8547 30303 30303/udp -ENTRYPOINT ["geth"] diff --git a/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools b/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools deleted file mode 100644 index 2f661ba..0000000 --- a/vendor/github.com/ethereum/go-ethereum/Dockerfile.alltools +++ /dev/null @@ -1,15 +0,0 @@ -# Build Geth in a stock Go builder container -FROM golang:1.13-alpine as builder - -RUN apk add --no-cache make gcc musl-dev linux-headers git - -ADD . /go-ethereum -RUN cd /go-ethereum && make all - -# Pull all binaries into a second stage deploy alpine container -FROM alpine:latest - -RUN apk add --no-cache ca-certificates -COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/ - -EXPOSE 8545 8546 8547 30303 30303/udp diff --git a/vendor/github.com/ethereum/go-ethereum/Makefile b/vendor/github.com/ethereum/go-ethereum/Makefile deleted file mode 100644 index 67095f4..0000000 --- a/vendor/github.com/ethereum/go-ethereum/Makefile +++ /dev/null @@ -1,146 +0,0 @@ -# This Makefile is meant to be used by people that do not usually work -# with Go source code. If you know what GOPATH is then you probably -# don't need to bother with make. - -.PHONY: geth android ios geth-cross evm all test clean -.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le -.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 -.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64 -.PHONY: geth-windows geth-windows-386 geth-windows-amd64 - -GOBIN = ./build/bin -GO ?= latest -GORUN = env GO111MODULE=on go run - -geth: - $(GORUN) build/ci.go install ./cmd/geth - @echo "Done building." - @echo "Run \"$(GOBIN)/geth\" to launch geth." - -all: - $(GORUN) build/ci.go install - -android: - $(GORUN) build/ci.go aar --local - @echo "Done building." - @echo "Import \"$(GOBIN)/geth.aar\" to use the library." - -ios: - $(GORUN) build/ci.go xcode --local - @echo "Done building." - @echo "Import \"$(GOBIN)/Geth.framework\" to use the library." - -test: all - $(GORUN) build/ci.go test - -lint: ## Run linters. - $(GORUN) build/ci.go lint - -clean: - env GO111MODULE=on go clean -cache - rm -fr build/_workspace/pkg/ $(GOBIN)/* - -# The devtools target installs tools required for 'go generate'. -# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. - -devtools: - env GOBIN= go get -u golang.org/x/tools/cmd/stringer - env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata - env GOBIN= go get -u github.com/fjl/gencodec - env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go - env GOBIN= go install ./cmd/abigen - @type "npm" 2> /dev/null || echo 'Please install node.js and npm' - @type "solc" 2> /dev/null || echo 'Please install solc' - @type "protoc" 2> /dev/null || echo 'Please install protoc' - -# Cross Compilation Targets (xgo) - -geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios - @echo "Full cross compilation done:" - @ls -ld $(GOBIN)/geth-* - -geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le - @echo "Linux cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* - -geth-linux-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth - @echo "Linux 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep 386 - -geth-linux-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth - @echo "Linux amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep amd64 - -geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 - @echo "Linux ARM cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm - -geth-linux-arm-5: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth - @echo "Linux ARMv5 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-5 - -geth-linux-arm-6: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth - @echo "Linux ARMv6 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-6 - -geth-linux-arm-7: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth - @echo "Linux ARMv7 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-7 - -geth-linux-arm64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth - @echo "Linux ARM64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm64 - -geth-linux-mips: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips - -geth-linux-mipsle: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPSle cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mipsle - -geth-linux-mips64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64 - -geth-linux-mips64le: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64le cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64le - -geth-darwin: geth-darwin-386 geth-darwin-amd64 - @echo "Darwin cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* - -geth-darwin-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth - @echo "Darwin 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep 386 - -geth-darwin-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth - @echo "Darwin amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep amd64 - -geth-windows: geth-windows-386 geth-windows-amd64 - @echo "Windows cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* - -geth-windows-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth - @echo "Windows 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep 386 - -geth-windows-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth - @echo "Windows amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep amd64 diff --git a/vendor/github.com/ethereum/go-ethereum/README.md b/vendor/github.com/ethereum/go-ethereum/README.md deleted file mode 100644 index 72479b5..0000000 --- a/vendor/github.com/ethereum/go-ethereum/README.md +++ /dev/null @@ -1,348 +0,0 @@ -## Go Ethereum - -Official Golang implementation of the Ethereum protocol. - -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/ethereum/go-ethereum) -[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) -[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) - -Automated builds are available for stable releases and the unstable master branch. Binary -archives are published at https://geth.ethereum.org/downloads/. - -## Building the source - -For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. - -Building `geth` requires both a Go (version 1.10 or later) and a C compiler. You can install -them using your favourite package manager. Once the dependencies are installed, run - -```shell -make geth -``` - -or, to build the full suite of utilities: - -```shell -make all -``` - -## Executables - -The go-ethereum project comes with several wrappers/executables found in the `cmd` -directory. - -| Command | Description | -| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | -| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | -| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | -| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | - -## Running `geth` - -Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), -but we've enumerated a few common parameter combos to get you up to speed quickly -on how you can run your own `geth` instance. - -### Full node on the main Ethereum network - -By far the most common scenario is people wanting to simply interact with the Ethereum -network: create accounts; transfer funds; deploy and interact with contracts. For this -particular use-case the user doesn't care about years-old historical data, so we can -fast-sync quickly to the current state of the network. To do so: - -```shell -$ geth console -``` - -This command will: - * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), - causing it to download more data in exchange for avoiding processing the entire history - of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), - (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) - as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). - This tool is optional and if you leave it out you can always attach to an already running - `geth` instance with `geth attach`. - -### A Full node on the Ethereum test network - -Transitioning towards developers, if you'd like to play around with creating Ethereum -contracts, you almost certainly would like to do that without any real money involved until -you get the hang of the entire system. In other words, instead of attaching to the main -network, you want to join the **test** network with your node, which is fully equivalent to -the main network, but with play-Ether only. - -```shell -$ geth --testnet console -``` - -The `console` subcommand has the exact same meaning as above and they are equally -useful on the testnet too. Please see above for their explanations if you've skipped here. - -Specifying the `--testnet` flag, however, will reconfigure your `geth` instance a bit: - - * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` - will nest itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on - Linux). Note, on OSX and Linux this also means that attaching to a running testnet node - requires the use of a custom endpoint since `geth attach` will try to attach to a - production node endpoint by default. E.g. - `geth attach /testnet/geth.ipc`. Windows users are not affected by - this. - * Instead of connecting the main Ethereum network, the client will connect to the test - network, which uses different P2P bootnodes, different network IDs and genesis states. - -*Note: Although there are some internal protective measures to prevent transactions from -crossing over between the main network and test network, you should make sure to always -use separate accounts for play-money and real-money. Unless you manually move -accounts, `geth` will by default correctly separate the two networks and will not make any -accounts available between them.* - -### Full node on the Rinkeby test network - -The above test network is a cross-client one based on the ethash proof-of-work consensus -algorithm. As such, it has certain extra overhead and is more susceptible to reorganization -attacks due to the network's low difficulty/security. Go Ethereum also supports connecting -to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) -(operated by members of the community). This network is lighter, more secure, but is only -supported by go-ethereum. - -```shell -$ geth --rinkeby console -``` - -### Configuration - -As an alternative to passing the numerous flags to the `geth` binary, you can also pass a -configuration file via: - -```shell -$ geth --config /path/to/your_config.toml -``` - -To get an idea how the file should look like you can use the `dumpconfig` subcommand to -export your existing configuration: - -```shell -$ geth --your-favourite-flags dumpconfig -``` - -*Note: This works only with `geth` v1.6.0 and above.* - -#### Docker quick start - -One of the quickest ways to get Ethereum up and running on your machine is by using -Docker: - -```shell -docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ - -p 8545:8545 -p 30303:30303 \ - ethereum/client-go -``` - -This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the -above command does. It will also create a persistent volume in your home directory for -saving your blockchain as well as map the default ports. There is also an `alpine` tag -available for a slim version of the image. - -Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers -and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not -accessible from the outside. - -### Programmatically interfacing `geth` nodes - -As a developer, sooner rather than later you'll want to start interacting with `geth` and the -Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) -and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). -These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based -platforms, and named pipes on Windows). - -The IPC interface is enabled by default and exposes all the APIs supported by `geth`, -whereas the HTTP and WS interfaces need to manually be enabled and only expose a -subset of APIs due to security reasons. These can be turned on/off and configured as -you'd expect. - -HTTP based JSON-RPC API options: - - * `--rpc` Enable the HTTP-RPC server - * `--rpcaddr` HTTP-RPC server listening interface (default: `localhost`) - * `--rpcport` HTTP-RPC server listening port (default: `8545`) - * `--rpcapi` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) - * `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) - * `--ws` Enable the WS-RPC server - * `--wsaddr` WS-RPC server listening interface (default: `localhost`) - * `--wsport` WS-RPC server listening port (default: `8546`) - * `--wsapi` API's offered over the WS-RPC interface (default: `eth,net,web3`) - * `--wsorigins` Origins from which to accept websockets requests - * `--ipcdisable` Disable the IPC-RPC server - * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) - * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) - -You'll need to use your own programming environments' capabilities (libraries, tools, etc) to -connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll -need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You -can reuse the same connection for multiple requests! - -**Note: Please understand the security implications of opening up an HTTP/WS based -transport before doing so! Hackers on the internet are actively trying to subvert -Ethereum nodes with exposed APIs! Further, all browser tabs can access locally -running web servers, so malicious web pages could try to subvert locally available -APIs!** - -### Operating a private network - -Maintaining your own private network is more involved as a lot of configurations taken for -granted in the official networks need to be manually set up. - -#### Defining the private genesis state - -First, you'll need to create the genesis state of your networks, which all nodes need to be -aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): - -```json -{ - "config": { - "chainId": , - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0 - }, - "alloc": {}, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x20000", - "extraData": "", - "gasLimit": "0x2fefd8", - "nonce": "0x0000000000000042", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x00" -} -``` - -The above fields should be fine for most purposes, although we'd recommend changing -the `nonce` to some random value so you prevent unknown remote nodes from being able -to connect to you. If you'd like to pre-fund some accounts for easier testing, create -the accounts and populate the `alloc` field with their addresses. - -```json -"alloc": { - "0x0000000000000000000000000000000000000001": { - "balance": "111111111" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "222222222" - } -} -``` - -With the genesis state defined in the above JSON file, you'll need to initialize **every** -`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly -set: - -```shell -$ geth init path/to/genesis.json -``` - -#### Creating the rendezvous point - -With all nodes that you want to run initialized to the desired genesis state, you'll need to -start a bootstrap node that others can use to find each other in your network and/or over -the internet. The clean way is to configure and run a dedicated bootnode: - -```shell -$ bootnode --genkey=boot.key -$ bootnode --nodekey=boot.key -``` - -With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format) -that other nodes can use to connect to it and exchange peer information. Make sure to -replace the displayed IP address information (most probably `[::]`) with your externally -accessible IP to get the actual `enode` URL. - -*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less -recommended way.* - -#### Starting up your member nodes - -With the bootnode operational and externally reachable (you can try -`telnet ` to ensure it's indeed reachable), start every subsequent `geth` -node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will -probably also be desirable to keep the data directory of your private network separated, so -do also specify a custom `--datadir` flag. - -```shell -$ geth --datadir=path/to/custom/data/folder --bootnodes= -``` - -*Note: Since your network will be completely cut off from the main and test networks, you'll -also need to configure a miner to process transactions and create new blocks for you.* - -#### Running a private miner - -Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, -requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a -setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) -and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. - -In a private network setting, however a single CPU miner instance is more than enough for -practical purposes as it can produce a stable stream of blocks at the correct intervals -without needing heavy resources (consider running on a single thread, no need for multiple -ones either). To start a `geth` instance for mining, run it with all your usual flags, extended -by: - -```shell -$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 -``` - -Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price -transactions are accepted at (`--gasprice`). - -## Contribution - -Thank you for considering to help out with the source code! We welcome contributions -from anyone on the internet, and are grateful for even the smallest of fixes! - -If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request -for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) -to ensure those changes are in line with the general philosophy of the project and/or get -some early feedback which can make both your efforts much lighter as well as our review -and merge procedures quick and simple. - -Please make sure your contributions adhere to our coding guidelines: - - * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) - guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) - guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" - -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) -for more details on configuring your environment, managing project dependencies, and -testing procedures. - -## License - -The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the -[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), -also included in our repository in the `COPYING.LESSER` file. - -The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the -[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also -included in our repository in the `COPYING` file. diff --git a/vendor/github.com/ethereum/go-ethereum/SECURITY.md b/vendor/github.com/ethereum/go-ethereum/SECURITY.md deleted file mode 100644 index bc54ede..0000000 --- a/vendor/github.com/ethereum/go-ethereum/SECURITY.md +++ /dev/null @@ -1,120 +0,0 @@ -# Security Policy - -## Supported Versions - -Please see Releases. We recommend to use the most recent released version. - -## Audit reports - -Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits - - -| Scope | Date | Report Link | -| ------- | ------- | ----------- | -| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | -| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | - - - -## Reporting a Vulnerability - -**Please do not file a public ticket** mentioning the vulnerability. - -To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. - -The following key may be used to communicate sensitive information to developers. - -Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A` - - -``` ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY -neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9 -L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi -m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b -fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd -EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7 -M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv -QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H -h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c -2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ -EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB -tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0 -aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA -BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo -k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV -Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh -jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV -fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA -cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15 -AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc -c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv -6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ -TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh -NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV -xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL -fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj -j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K -J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl -juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/ -r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi -w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL -i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y -vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG -nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h -5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v -1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi -b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC -AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl -b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO -RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU -0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/ -ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6 -amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b -bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji -Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII -e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1// -4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr -BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP -G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI -BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN -8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC -OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr -URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP -Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/ -aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat -F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv -5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J -9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l -7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu -ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD -N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI -nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0 -PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8 -f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1 -DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 -D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i -PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A -4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr -eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0 -VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9 -IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz -A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ -EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM -b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 -KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I -Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0 -K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T -NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd -5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP -cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w -cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+ -cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ -85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4 -=r6KK ------END PGP PUBLIC KEY BLOCK----- -``` diff --git a/vendor/github.com/ethereum/go-ethereum/appveyor.yml b/vendor/github.com/ethereum/go-ethereum/appveyor.yml deleted file mode 100644 index 90a862a..0000000 --- a/vendor/github.com/ethereum/go-ethereum/appveyor.yml +++ /dev/null @@ -1,40 +0,0 @@ -os: Visual Studio 2015 - -# Clone directly into GOPATH. -clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum -clone_depth: 5 -version: "{branch}.{build}" -environment: - global: - GOPATH: C:\gopath - CC: gcc.exe - matrix: - - GETH_ARCH: amd64 - MSYS2_ARCH: x86_64 - MSYS2_BITS: 64 - MSYSTEM: MINGW64 - PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH% - - GETH_ARCH: 386 - MSYS2_ARCH: i686 - MSYS2_BITS: 32 - MSYSTEM: MINGW32 - PATH: C:\msys64\mingw32\bin\;C:\Program Files (x86)\NSIS\;%PATH% - -install: - - git submodule update --init - - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.13.8.windows-%GETH_ARCH%.zip - - 7z x go1.13.8.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - - go version - - gcc --version - -build_script: - - go run build\ci.go install - -after_build: - - go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - - go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - -test_script: - - set CGO_ENABLED=1 - - go run build\ci.go test -coverage diff --git a/vendor/github.com/ethereum/go-ethereum/circle.yml b/vendor/github.com/ethereum/go-ethereum/circle.yml deleted file mode 100644 index 39ff5d8..0000000 --- a/vendor/github.com/ethereum/go-ethereum/circle.yml +++ /dev/null @@ -1,32 +0,0 @@ -machine: - services: - - docker - -dependencies: - cache_directories: - - "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds - - "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds - override: - # Restore all previously cached docker images - - mkdir -p ~/.docker - - for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done - - # Pull in and hive, restore cached ethash DAGs and do a dry run - - go get -u github.com/karalabe/hive - - (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash) - - (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/) - - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6) - - # Cache all the docker images and the ethash DAGs - - for img in `docker images | grep -v "^" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done - - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash - -test: - override: - # Build Geth and move into a known folder - - make geth - - cp ./build/bin/geth $HOME/geth - - # Run hive and move all generated logs into the public artifacts folder - - (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.) - - cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS diff --git a/vendor/github.com/ethereum/go-ethereum/common/mclock/mclock.go b/vendor/github.com/ethereum/go-ethereum/common/mclock/mclock.go deleted file mode 100644 index 3aca257..0000000 --- a/vendor/github.com/ethereum/go-ethereum/common/mclock/mclock.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package mclock is a wrapper for a monotonic clock source -package mclock - -import ( - "time" - - "github.com/aristanetworks/goarista/monotime" -) - -// AbsTime represents absolute monotonic time. -type AbsTime time.Duration - -// Now returns the current absolute monotonic time. -func Now() AbsTime { - return AbsTime(monotime.Now()) -} - -// Add returns t + d as absolute time. -func (t AbsTime) Add(d time.Duration) AbsTime { - return t + AbsTime(d) -} - -// Sub returns t - t2 as a duration. -func (t AbsTime) Sub(t2 AbsTime) time.Duration { - return time.Duration(t - t2) -} - -// The Clock interface makes it possible to replace the monotonic system clock with -// a simulated clock. -type Clock interface { - Now() AbsTime - Sleep(time.Duration) - NewTimer(time.Duration) ChanTimer - After(time.Duration) <-chan AbsTime - AfterFunc(d time.Duration, f func()) Timer -} - -// Timer is a cancellable event created by AfterFunc. -type Timer interface { - // Stop cancels the timer. It returns false if the timer has already - // expired or been stopped. - Stop() bool -} - -// ChanTimer is a cancellable event created by NewTimer. -type ChanTimer interface { - Timer - - // The channel returned by C receives a value when the timer expires. - C() <-chan AbsTime - // Reset reschedules the timer with a new timeout. - // It should be invoked only on stopped or expired timers with drained channels. - Reset(time.Duration) -} - -// System implements Clock using the system clock. -type System struct{} - -// Now returns the current monotonic time. -func (c System) Now() AbsTime { - return AbsTime(monotime.Now()) -} - -// Sleep blocks for the given duration. -func (c System) Sleep(d time.Duration) { - time.Sleep(d) -} - -// NewTimer creates a timer which can be rescheduled. -func (c System) NewTimer(d time.Duration) ChanTimer { - ch := make(chan AbsTime, 1) - t := time.AfterFunc(d, func() { - // This send is non-blocking because that's how time.Timer - // behaves. It doesn't matter in the happy case, but does - // when Reset is misused. - select { - case ch <- c.Now(): - default: - } - }) - return &systemTimer{t, ch} -} - -// After returns a channel which receives the current time after d has elapsed. -func (c System) After(d time.Duration) <-chan AbsTime { - ch := make(chan AbsTime, 1) - time.AfterFunc(d, func() { ch <- c.Now() }) - return ch -} - -// AfterFunc runs f on a new goroutine after the duration has elapsed. -func (c System) AfterFunc(d time.Duration, f func()) Timer { - return time.AfterFunc(d, f) -} - -type systemTimer struct { - *time.Timer - ch <-chan AbsTime -} - -func (st *systemTimer) Reset(d time.Duration) { - st.Timer.Reset(d) -} - -func (st *systemTimer) C() <-chan AbsTime { - return st.ch -} diff --git a/vendor/github.com/ethereum/go-ethereum/common/mclock/simclock.go b/vendor/github.com/ethereum/go-ethereum/common/mclock/simclock.go deleted file mode 100644 index 766ca0f..0000000 --- a/vendor/github.com/ethereum/go-ethereum/common/mclock/simclock.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mclock - -import ( - "container/heap" - "sync" - "time" -) - -// Simulated implements a virtual Clock for reproducible time-sensitive tests. It -// simulates a scheduler on a virtual timescale where actual processing takes zero time. -// -// The virtual clock doesn't advance on its own, call Run to advance it and execute timers. -// Since there is no way to influence the Go scheduler, testing timeout behaviour involving -// goroutines needs special care. A good way to test such timeouts is as follows: First -// perform the action that is supposed to time out. Ensure that the timer you want to test -// is created. Then run the clock until after the timeout. Finally observe the effect of -// the timeout using a channel or semaphore. -type Simulated struct { - now AbsTime - scheduled simTimerHeap - mu sync.RWMutex - cond *sync.Cond -} - -// simTimer implements ChanTimer on the virtual clock. -type simTimer struct { - at AbsTime - index int // position in s.scheduled - s *Simulated - do func() - ch <-chan AbsTime -} - -func (s *Simulated) init() { - if s.cond == nil { - s.cond = sync.NewCond(&s.mu) - } -} - -// Run moves the clock by the given duration, executing all timers before that duration. -func (s *Simulated) Run(d time.Duration) { - s.mu.Lock() - s.init() - - end := s.now + AbsTime(d) - var do []func() - for len(s.scheduled) > 0 && s.scheduled[0].at <= end { - ev := heap.Pop(&s.scheduled).(*simTimer) - do = append(do, ev.do) - } - s.now = end - s.mu.Unlock() - - for _, fn := range do { - fn() - } -} - -// ActiveTimers returns the number of timers that haven't fired. -func (s *Simulated) ActiveTimers() int { - s.mu.RLock() - defer s.mu.RUnlock() - - return len(s.scheduled) -} - -// WaitForTimers waits until the clock has at least n scheduled timers. -func (s *Simulated) WaitForTimers(n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.init() - - for len(s.scheduled) < n { - s.cond.Wait() - } -} - -// Now returns the current virtual time. -func (s *Simulated) Now() AbsTime { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.now -} - -// Sleep blocks until the clock has advanced by d. -func (s *Simulated) Sleep(d time.Duration) { - <-s.After(d) -} - -// NewTimer creates a timer which fires when the clock has advanced by d. -func (s *Simulated) NewTimer(d time.Duration) ChanTimer { - s.mu.Lock() - defer s.mu.Unlock() - - ch := make(chan AbsTime, 1) - var timer *simTimer - timer = s.schedule(d, func() { ch <- timer.at }) - timer.ch = ch - return timer -} - -// After returns a channel which receives the current time after the clock -// has advanced by d. -func (s *Simulated) After(d time.Duration) <-chan AbsTime { - return s.NewTimer(d).C() -} - -// AfterFunc runs fn after the clock has advanced by d. Unlike with the system -// clock, fn runs on the goroutine that calls Run. -func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer { - s.mu.Lock() - defer s.mu.Unlock() - - return s.schedule(d, fn) -} - -func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer { - s.init() - - at := s.now + AbsTime(d) - ev := &simTimer{do: fn, at: at, s: s} - heap.Push(&s.scheduled, ev) - s.cond.Broadcast() - return ev -} - -func (ev *simTimer) Stop() bool { - ev.s.mu.Lock() - defer ev.s.mu.Unlock() - - if ev.index < 0 { - return false - } - heap.Remove(&ev.s.scheduled, ev.index) - ev.s.cond.Broadcast() - ev.index = -1 - return true -} - -func (ev *simTimer) Reset(d time.Duration) { - if ev.ch == nil { - panic("mclock: Reset() on timer created by AfterFunc") - } - - ev.s.mu.Lock() - defer ev.s.mu.Unlock() - ev.at = ev.s.now.Add(d) - if ev.index < 0 { - heap.Push(&ev.s.scheduled, ev) // already expired - } else { - heap.Fix(&ev.s.scheduled, ev.index) // hasn't fired yet, reschedule - } - ev.s.cond.Broadcast() -} - -func (ev *simTimer) C() <-chan AbsTime { - if ev.ch == nil { - panic("mclock: C() on timer created by AfterFunc") - } - return ev.ch -} - -type simTimerHeap []*simTimer - -func (h *simTimerHeap) Len() int { - return len(*h) -} - -func (h *simTimerHeap) Less(i, j int) bool { - return (*h)[i].at < (*h)[j].at -} - -func (h *simTimerHeap) Swap(i, j int) { - (*h)[i], (*h)[j] = (*h)[j], (*h)[i] - (*h)[i].index = i - (*h)[j].index = j -} - -func (h *simTimerHeap) Push(x interface{}) { - t := x.(*simTimer) - t.index = len(*h) - *h = append(*h, t) -} - -func (h *simTimerHeap) Pop() interface{} { - end := len(*h) - 1 - t := (*h)[end] - t.index = -1 - (*h)[end] = nil - *h = (*h)[:end] - return t -} diff --git a/vendor/github.com/ethereum/go-ethereum/common/prque/lazyqueue.go b/vendor/github.com/ethereum/go-ethereum/common/prque/lazyqueue.go deleted file mode 100644 index 92ddd77..0000000 --- a/vendor/github.com/ethereum/go-ethereum/common/prque/lazyqueue.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package prque - -import ( - "container/heap" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" -) - -// LazyQueue is a priority queue data structure where priorities can change over -// time and are only evaluated on demand. -// Two callbacks are required: -// - priority evaluates the actual priority of an item -// - maxPriority gives an upper estimate for the priority in any moment between -// now and the given absolute time -// If the upper estimate is exceeded then Update should be called for that item. -// A global Refresh function should also be called periodically. -type LazyQueue struct { - clock mclock.Clock - // Items are stored in one of two internal queues ordered by estimated max - // priority until the next and the next-after-next refresh. Update and Refresh - // always places items in queue[1]. - queue [2]*sstack - popQueue *sstack - period time.Duration - maxUntil mclock.AbsTime - indexOffset int - setIndex SetIndexCallback - priority PriorityCallback - maxPriority MaxPriorityCallback -} - -type ( - PriorityCallback func(data interface{}, now mclock.AbsTime) int64 // actual priority callback - MaxPriorityCallback func(data interface{}, until mclock.AbsTime) int64 // estimated maximum priority callback -) - -// NewLazyQueue creates a new lazy queue -func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue { - q := &LazyQueue{ - popQueue: newSstack(nil), - setIndex: setIndex, - priority: priority, - maxPriority: maxPriority, - clock: clock, - period: refreshPeriod} - q.Reset() - q.Refresh() - return q -} - -// Reset clears the contents of the queue -func (q *LazyQueue) Reset() { - q.queue[0] = newSstack(q.setIndex0) - q.queue[1] = newSstack(q.setIndex1) -} - -// Refresh should be called at least with the frequency specified by the refreshPeriod parameter -func (q *LazyQueue) Refresh() { - q.maxUntil = q.clock.Now() + mclock.AbsTime(q.period) - for q.queue[0].Len() != 0 { - q.Push(heap.Pop(q.queue[0]).(*item).value) - } - q.queue[0], q.queue[1] = q.queue[1], q.queue[0] - q.indexOffset = 1 - q.indexOffset - q.maxUntil += mclock.AbsTime(q.period) -} - -// Push adds an item to the queue -func (q *LazyQueue) Push(data interface{}) { - heap.Push(q.queue[1], &item{data, q.maxPriority(data, q.maxUntil)}) -} - -// Update updates the upper priority estimate for the item with the given queue index -func (q *LazyQueue) Update(index int) { - q.Push(q.Remove(index)) -} - -// Pop removes and returns the item with the greatest actual priority -func (q *LazyQueue) Pop() (interface{}, int64) { - var ( - resData interface{} - resPri int64 - ) - q.MultiPop(func(data interface{}, priority int64) bool { - resData = data - resPri = priority - return false - }) - return resData, resPri -} - -// peekIndex returns the index of the internal queue where the item with the -// highest estimated priority is or -1 if both are empty -func (q *LazyQueue) peekIndex() int { - if q.queue[0].Len() != 0 { - if q.queue[1].Len() != 0 && q.queue[1].blocks[0][0].priority > q.queue[0].blocks[0][0].priority { - return 1 - } - return 0 - } - if q.queue[1].Len() != 0 { - return 1 - } - return -1 -} - -// MultiPop pops multiple items from the queue and is more efficient than calling -// Pop multiple times. Popped items are passed to the callback. MultiPop returns -// when the callback returns false or there are no more items to pop. -func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) bool) { - now := q.clock.Now() - nextIndex := q.peekIndex() - for nextIndex != -1 { - data := heap.Pop(q.queue[nextIndex]).(*item).value - heap.Push(q.popQueue, &item{data, q.priority(data, now)}) - nextIndex = q.peekIndex() - for q.popQueue.Len() != 0 && (nextIndex == -1 || q.queue[nextIndex].blocks[0][0].priority < q.popQueue.blocks[0][0].priority) { - i := heap.Pop(q.popQueue).(*item) - if !callback(i.value, i.priority) { - for q.popQueue.Len() != 0 { - q.Push(heap.Pop(q.popQueue).(*item).value) - } - return - } - } - } -} - -// PopItem pops the item from the queue only, dropping the associated priority value. -func (q *LazyQueue) PopItem() interface{} { - i, _ := q.Pop() - return i -} - -// Remove removes removes the item with the given index. -func (q *LazyQueue) Remove(index int) interface{} { - if index < 0 { - return nil - } - return heap.Remove(q.queue[index&1^q.indexOffset], index>>1).(*item).value -} - -// Empty checks whether the priority queue is empty. -func (q *LazyQueue) Empty() bool { - return q.queue[0].Len() == 0 && q.queue[1].Len() == 0 -} - -// Size returns the number of items in the priority queue. -func (q *LazyQueue) Size() int { - return q.queue[0].Len() + q.queue[1].Len() -} - -// setIndex0 translates internal queue item index to the virtual index space of LazyQueue -func (q *LazyQueue) setIndex0(data interface{}, index int) { - if index == -1 { - q.setIndex(data, -1) - } else { - q.setIndex(data, index+index) - } -} - -// setIndex1 translates internal queue item index to the virtual index space of LazyQueue -func (q *LazyQueue) setIndex1(data interface{}, index int) { - q.setIndex(data, index+index+1) -} diff --git a/vendor/github.com/ethereum/go-ethereum/common/prque/prque.go b/vendor/github.com/ethereum/go-ethereum/common/prque/prque.go deleted file mode 100644 index 3cc5a1a..0000000 --- a/vendor/github.com/ethereum/go-ethereum/common/prque/prque.go +++ /dev/null @@ -1,78 +0,0 @@ -// CookieJar - A contestant's algorithm toolbox -// Copyright (c) 2013 Peter Szilagyi. All rights reserved. -// -// CookieJar is dual licensed: use of this source code is governed by a BSD -// license that can be found in the LICENSE file. Alternatively, the CookieJar -// toolbox may be used in accordance with the terms and conditions contained -// in a signed written agreement between you and the author(s). - -// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". - -// Package prque implements a priority queue data structure supporting arbitrary -// value types and int64 priorities. -// -// If you would like to use a min-priority queue, simply negate the priorities. -// -// Internally the queue is based on the standard heap package working on a -// sortable version of the block based stack. -package prque - -import ( - "container/heap" -) - -// Priority queue data structure. -type Prque struct { - cont *sstack -} - -// New creates a new priority queue. -func New(setIndex SetIndexCallback) *Prque { - return &Prque{newSstack(setIndex)} -} - -// Pushes a value with a given priority into the queue, expanding if necessary. -func (p *Prque) Push(data interface{}, priority int64) { - heap.Push(p.cont, &item{data, priority}) -} - -// Peek returns the value with the greates priority but does not pop it off. -func (p *Prque) Peek() (interface{}, int64) { - item := p.cont.blocks[0][0] - return item.value, item.priority -} - -// Pops the value with the greates priority off the stack and returns it. -// Currently no shrinking is done. -func (p *Prque) Pop() (interface{}, int64) { - item := heap.Pop(p.cont).(*item) - return item.value, item.priority -} - -// Pops only the item from the queue, dropping the associated priority value. -func (p *Prque) PopItem() interface{} { - return heap.Pop(p.cont).(*item).value -} - -// Remove removes the element with the given index. -func (p *Prque) Remove(i int) interface{} { - if i < 0 { - return nil - } - return heap.Remove(p.cont, i) -} - -// Checks whether the priority queue is empty. -func (p *Prque) Empty() bool { - return p.cont.Len() == 0 -} - -// Returns the number of element in the priority queue. -func (p *Prque) Size() int { - return p.cont.Len() -} - -// Clears the contents of the priority queue. -func (p *Prque) Reset() { - *p = *New(p.cont.setIndex) -} diff --git a/vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go b/vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go deleted file mode 100644 index 8518af5..0000000 --- a/vendor/github.com/ethereum/go-ethereum/common/prque/sstack.go +++ /dev/null @@ -1,114 +0,0 @@ -// CookieJar - A contestant's algorithm toolbox -// Copyright (c) 2013 Peter Szilagyi. All rights reserved. -// -// CookieJar is dual licensed: use of this source code is governed by a BSD -// license that can be found in the LICENSE file. Alternatively, the CookieJar -// toolbox may be used in accordance with the terms and conditions contained -// in a signed written agreement between you and the author(s). - -// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque". - -package prque - -// The size of a block of data -const blockSize = 4096 - -// A prioritized item in the sorted stack. -// -// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0. -// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63. -type item struct { - value interface{} - priority int64 -} - -// SetIndexCallback is called when the element is moved to a new index. -// Providing SetIndexCallback is optional, it is needed only if the application needs -// to delete elements other than the top one. -type SetIndexCallback func(data interface{}, index int) - -// Internal sortable stack data structure. Implements the Push and Pop ops for -// the stack (heap) functionality and the Len, Less and Swap methods for the -// sortability requirements of the heaps. -type sstack struct { - setIndex SetIndexCallback - size int - capacity int - offset int - - blocks [][]*item - active []*item -} - -// Creates a new, empty stack. -func newSstack(setIndex SetIndexCallback) *sstack { - result := new(sstack) - result.setIndex = setIndex - result.active = make([]*item, blockSize) - result.blocks = [][]*item{result.active} - result.capacity = blockSize - return result -} - -// Pushes a value onto the stack, expanding it if necessary. Required by -// heap.Interface. -func (s *sstack) Push(data interface{}) { - if s.size == s.capacity { - s.active = make([]*item, blockSize) - s.blocks = append(s.blocks, s.active) - s.capacity += blockSize - s.offset = 0 - } else if s.offset == blockSize { - s.active = s.blocks[s.size/blockSize] - s.offset = 0 - } - if s.setIndex != nil { - s.setIndex(data.(*item).value, s.size) - } - s.active[s.offset] = data.(*item) - s.offset++ - s.size++ -} - -// Pops a value off the stack and returns it. Currently no shrinking is done. -// Required by heap.Interface. -func (s *sstack) Pop() (res interface{}) { - s.size-- - s.offset-- - if s.offset < 0 { - s.offset = blockSize - 1 - s.active = s.blocks[s.size/blockSize] - } - res, s.active[s.offset] = s.active[s.offset], nil - if s.setIndex != nil { - s.setIndex(res.(*item).value, -1) - } - return -} - -// Returns the length of the stack. Required by sort.Interface. -func (s *sstack) Len() int { - return s.size -} - -// Compares the priority of two elements of the stack (higher is first). -// Required by sort.Interface. -func (s *sstack) Less(i, j int) bool { - return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 -} - -// Swaps two elements in the stack. Required by sort.Interface. -func (s *sstack) Swap(i, j int) { - ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize - a, b := s.blocks[jb][jo], s.blocks[ib][io] - if s.setIndex != nil { - s.setIndex(a.value, i) - s.setIndex(b.value, j) - } - s.blocks[ib][io], s.blocks[jb][jo] = a, b -} - -// Resets the stack, effectively clearing its contents. -func (s *sstack) Reset() { - *s = *newSstack(s.setIndex) -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/block.go b/vendor/github.com/ethereum/go-ethereum/core/types/block.go deleted file mode 100644 index 741ff8e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/block.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package types contains data types related to Ethereum consensus. -package types - -import ( - "encoding/binary" - "fmt" - "io" - "math/big" - "reflect" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" -) - -var ( - EmptyRootHash = DeriveSha(Transactions{}) - EmptyUncleHash = rlpHash([]*Header(nil)) -) - -// A BlockNonce is a 64-bit hash which proves (combined with the -// mix-hash) that a sufficient amount of computation has been carried -// out on a block. -type BlockNonce [8]byte - -// EncodeNonce converts the given integer to a block nonce. -func EncodeNonce(i uint64) BlockNonce { - var n BlockNonce - binary.BigEndian.PutUint64(n[:], i) - return n -} - -// Uint64 returns the integer value of a block nonce. -func (n BlockNonce) Uint64() uint64 { - return binary.BigEndian.Uint64(n[:]) -} - -// MarshalText encodes n as a hex string with 0x prefix. -func (n BlockNonce) MarshalText() ([]byte, error) { - return hexutil.Bytes(n[:]).MarshalText() -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (n *BlockNonce) UnmarshalText(input []byte) error { - return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) -} - -//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go - -// Header represents a block header in the Ethereum blockchain. -type Header struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *big.Int `json:"difficulty" gencodec:"required"` - Number *big.Int `json:"number" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` - Time uint64 `json:"timestamp" gencodec:"required"` - Extra []byte `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash"` - Nonce BlockNonce `json:"nonce"` -} - -// field type overrides for gencodec -type headerMarshaling struct { - Difficulty *hexutil.Big - Number *hexutil.Big - GasLimit hexutil.Uint64 - GasUsed hexutil.Uint64 - Time hexutil.Uint64 - Extra hexutil.Bytes - Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON -} - -// Hash returns the block hash of the header, which is simply the keccak256 hash of its -// RLP encoding. -func (h *Header) Hash() common.Hash { - return rlpHash(h) -} - -var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) - -// Size returns the approximate memory used by all internal contents. It is used -// to approximate and limit the memory consumption of various caches. -func (h *Header) Size() common.StorageSize { - return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) -} - -// SanityCheck checks a few basic things -- these checks are way beyond what -// any 'sane' production values should hold, and can mainly be used to prevent -// that the unbounded fields are stuffed with junk data to add processing -// overhead -func (h *Header) SanityCheck() error { - if h.Number != nil && !h.Number.IsUint64() { - return fmt.Errorf("too large block number: bitlen %d", h.Number.BitLen()) - } - if h.Difficulty != nil { - if diffLen := h.Difficulty.BitLen(); diffLen > 80 { - return fmt.Errorf("too large block difficulty: bitlen %d", diffLen) - } - } - if eLen := len(h.Extra); eLen > 100*1024 { - return fmt.Errorf("too large block extradata: size %d", eLen) - } - return nil -} - -func rlpHash(x interface{}) (h common.Hash) { - hw := sha3.NewLegacyKeccak256() - rlp.Encode(hw, x) - hw.Sum(h[:0]) - return h -} - -// Body is a simple (mutable, non-safe) data container for storing and moving -// a block's data contents (transactions and uncles) together. -type Body struct { - Transactions []*Transaction - Uncles []*Header -} - -// Block represents an entire block in the Ethereum blockchain. -type Block struct { - header *Header - uncles []*Header - transactions Transactions - - // caches - hash atomic.Value - size atomic.Value - - // Td is used by package core to store the total difficulty - // of the chain up to and including the block. - td *big.Int - - // These fields are used by package eth to track - // inter-peer block relay. - ReceivedAt time.Time - ReceivedFrom interface{} -} - -// DeprecatedTd is an old relic for extracting the TD of a block. It is in the -// code solely to facilitate upgrading the database from the old format to the -// new, after which it should be deleted. Do not use! -func (b *Block) DeprecatedTd() *big.Int { - return b.td -} - -// [deprecated by eth/63] -// StorageBlock defines the RLP encoding of a Block stored in the -// state database. The StorageBlock encoding contains fields that -// would otherwise need to be recomputed. -type StorageBlock Block - -// "external" block encoding. used for eth protocol, etc. -type extblock struct { - Header *Header - Txs []*Transaction - Uncles []*Header -} - -// [deprecated by eth/63] -// "storage" block encoding. used for database. -type storageblock struct { - Header *Header - Txs []*Transaction - Uncles []*Header - TD *big.Int -} - -// NewBlock creates a new block. The input data is copied, -// changes to header and to the field values will not affect the -// block. -// -// The values of TxHash, UncleHash, ReceiptHash and Bloom in header -// are ignored and set to values derived from the given txs, uncles -// and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block { - b := &Block{header: CopyHeader(header), td: new(big.Int)} - - // TODO: panic if len(txs) != len(receipts) - if len(txs) == 0 { - b.header.TxHash = EmptyRootHash - } else { - b.header.TxHash = DeriveSha(Transactions(txs)) - b.transactions = make(Transactions, len(txs)) - copy(b.transactions, txs) - } - - if len(receipts) == 0 { - b.header.ReceiptHash = EmptyRootHash - } else { - b.header.ReceiptHash = DeriveSha(Receipts(receipts)) - b.header.Bloom = CreateBloom(receipts) - } - - if len(uncles) == 0 { - b.header.UncleHash = EmptyUncleHash - } else { - b.header.UncleHash = CalcUncleHash(uncles) - b.uncles = make([]*Header, len(uncles)) - for i := range uncles { - b.uncles[i] = CopyHeader(uncles[i]) - } - } - - return b -} - -// NewBlockWithHeader creates a block with the given header data. The -// header data is copied, changes to header and to the field values -// will not affect the block. -func NewBlockWithHeader(header *Header) *Block { - return &Block{header: CopyHeader(header)} -} - -// CopyHeader creates a deep copy of a block header to prevent side effects from -// modifying a header variable. -func CopyHeader(h *Header) *Header { - cpy := *h - if cpy.Difficulty = new(big.Int); h.Difficulty != nil { - cpy.Difficulty.Set(h.Difficulty) - } - if cpy.Number = new(big.Int); h.Number != nil { - cpy.Number.Set(h.Number) - } - if len(h.Extra) > 0 { - cpy.Extra = make([]byte, len(h.Extra)) - copy(cpy.Extra, h.Extra) - } - return &cpy -} - -// DecodeRLP decodes the Ethereum -func (b *Block) DecodeRLP(s *rlp.Stream) error { - var eb extblock - _, size, _ := s.Kind() - if err := s.Decode(&eb); err != nil { - return err - } - b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs - b.size.Store(common.StorageSize(rlp.ListSize(size))) - return nil -} - -// EncodeRLP serializes b into the Ethereum RLP block format. -func (b *Block) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, extblock{ - Header: b.header, - Txs: b.transactions, - Uncles: b.uncles, - }) -} - -// [deprecated by eth/63] -func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { - var sb storageblock - if err := s.Decode(&sb); err != nil { - return err - } - b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD - return nil -} - -// TODO: copies - -func (b *Block) Uncles() []*Header { return b.uncles } -func (b *Block) Transactions() Transactions { return b.transactions } - -func (b *Block) Transaction(hash common.Hash) *Transaction { - for _, transaction := range b.transactions { - if transaction.Hash() == hash { - return transaction - } - } - return nil -} - -func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } -func (b *Block) GasLimit() uint64 { return b.header.GasLimit } -func (b *Block) GasUsed() uint64 { return b.header.GasUsed } -func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } -func (b *Block) Time() uint64 { return b.header.Time } - -func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } -func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } -func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) } -func (b *Block) Bloom() Bloom { return b.header.Bloom } -func (b *Block) Coinbase() common.Address { return b.header.Coinbase } -func (b *Block) Root() common.Hash { return b.header.Root } -func (b *Block) ParentHash() common.Hash { return b.header.ParentHash } -func (b *Block) TxHash() common.Hash { return b.header.TxHash } -func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash } -func (b *Block) UncleHash() common.Hash { return b.header.UncleHash } -func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) } - -func (b *Block) Header() *Header { return CopyHeader(b.header) } - -// Body returns the non-header content of the block. -func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } - -// Size returns the true RLP encoded storage size of the block, either by encoding -// and returning it, or returning a previsouly cached value. -func (b *Block) Size() common.StorageSize { - if size := b.size.Load(); size != nil { - return size.(common.StorageSize) - } - c := writeCounter(0) - rlp.Encode(&c, b) - b.size.Store(common.StorageSize(c)) - return common.StorageSize(c) -} - -// SanityCheck can be used to prevent that unbounded fields are -// stuffed with junk data to add processing overhead -func (b *Block) SanityCheck() error { - return b.header.SanityCheck() -} - -type writeCounter common.StorageSize - -func (c *writeCounter) Write(b []byte) (int, error) { - *c += writeCounter(len(b)) - return len(b), nil -} - -func CalcUncleHash(uncles []*Header) common.Hash { - if len(uncles) == 0 { - return EmptyUncleHash - } - return rlpHash(uncles) -} - -// WithSeal returns a new block with the data from b but the header replaced with -// the sealed one. -func (b *Block) WithSeal(header *Header) *Block { - cpy := *header - - return &Block{ - header: &cpy, - transactions: b.transactions, - uncles: b.uncles, - } -} - -// WithBody returns a new block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { - block := &Block{ - header: CopyHeader(b.header), - transactions: make([]*Transaction, len(transactions)), - uncles: make([]*Header, len(uncles)), - } - copy(block.transactions, transactions) - for i := range uncles { - block.uncles[i] = CopyHeader(uncles[i]) - } - return block -} - -// Hash returns the keccak256 hash of b's header. -// The hash is computed on the first call and cached thereafter. -func (b *Block) Hash() common.Hash { - if hash := b.hash.Load(); hash != nil { - return hash.(common.Hash) - } - v := b.header.Hash() - b.hash.Store(v) - return v -} - -type Blocks []*Block diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go b/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go deleted file mode 100644 index d045c9e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/bloom9.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" -) - -type bytesBacked interface { - Bytes() []byte -} - -const ( - // BloomByteLength represents the number of bytes used in a header log bloom. - BloomByteLength = 256 - - // BloomBitLength represents the number of bits used in a header log bloom. - BloomBitLength = 8 * BloomByteLength -) - -// Bloom represents a 2048 bit bloom filter. -type Bloom [BloomByteLength]byte - -// BytesToBloom converts a byte slice to a bloom filter. -// It panics if b is not of suitable size. -func BytesToBloom(b []byte) Bloom { - var bloom Bloom - bloom.SetBytes(b) - return bloom -} - -// SetBytes sets the content of b to the given bytes. -// It panics if d is not of suitable size. -func (b *Bloom) SetBytes(d []byte) { - if len(b) < len(d) { - panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d))) - } - copy(b[BloomByteLength-len(d):], d) -} - -// Add adds d to the filter. Future calls of Test(d) will return true. -func (b *Bloom) Add(d *big.Int) { - bin := new(big.Int).SetBytes(b[:]) - bin.Or(bin, bloom9(d.Bytes())) - b.SetBytes(bin.Bytes()) -} - -// Big converts b to a big integer. -func (b Bloom) Big() *big.Int { - return new(big.Int).SetBytes(b[:]) -} - -func (b Bloom) Bytes() []byte { - return b[:] -} - -func (b Bloom) Test(test *big.Int) bool { - return BloomLookup(b, test) -} - -func (b Bloom) TestBytes(test []byte) bool { - return b.Test(new(big.Int).SetBytes(test)) - -} - -// MarshalText encodes b as a hex string with 0x prefix. -func (b Bloom) MarshalText() ([]byte, error) { - return hexutil.Bytes(b[:]).MarshalText() -} - -// UnmarshalText b as a hex string with 0x prefix. -func (b *Bloom) UnmarshalText(input []byte) error { - return hexutil.UnmarshalFixedText("Bloom", input, b[:]) -} - -func CreateBloom(receipts Receipts) Bloom { - bin := new(big.Int) - for _, receipt := range receipts { - bin.Or(bin, LogsBloom(receipt.Logs)) - } - - return BytesToBloom(bin.Bytes()) -} - -func LogsBloom(logs []*Log) *big.Int { - bin := new(big.Int) - for _, log := range logs { - bin.Or(bin, bloom9(log.Address.Bytes())) - for _, b := range log.Topics { - bin.Or(bin, bloom9(b[:])) - } - } - - return bin -} - -func bloom9(b []byte) *big.Int { - b = crypto.Keccak256(b) - - r := new(big.Int) - - for i := 0; i < 6; i += 2 { - t := big.NewInt(1) - b := (uint(b[i+1]) + (uint(b[i]) << 8)) & 2047 - r.Or(r, t.Lsh(t, b)) - } - - return r -} - -var Bloom9 = bloom9 - -func BloomLookup(bin Bloom, topic bytesBacked) bool { - bloom := bin.Big() - cmp := bloom9(topic.Bytes()) - - return bloom.And(bloom, cmp).Cmp(cmp) == 0 -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/derive_sha.go b/vendor/github.com/ethereum/go-ethereum/core/types/derive_sha.go deleted file mode 100644 index 00c42c5..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/derive_sha.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "bytes" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" -) - -type DerivableList interface { - Len() int - GetRlp(i int) []byte -} - -func DeriveSha(list DerivableList) common.Hash { - keybuf := new(bytes.Buffer) - trie := new(trie.Trie) - for i := 0; i < list.Len(); i++ { - keybuf.Reset() - rlp.Encode(keybuf, uint(i)) - trie.Update(keybuf.Bytes(), list.GetRlp(i)) - } - return trie.Hash() -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go deleted file mode 100644 index 4212b8d..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go +++ /dev/null @@ -1,138 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -var _ = (*headerMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (h Header) MarshalJSON() ([]byte, error) { - type Header struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash"` - Nonce BlockNonce `json:"nonce"` - Hash common.Hash `json:"hash"` - } - var enc Header - enc.ParentHash = h.ParentHash - enc.UncleHash = h.UncleHash - enc.Coinbase = h.Coinbase - enc.Root = h.Root - enc.TxHash = h.TxHash - enc.ReceiptHash = h.ReceiptHash - enc.Bloom = h.Bloom - enc.Difficulty = (*hexutil.Big)(h.Difficulty) - enc.Number = (*hexutil.Big)(h.Number) - enc.GasLimit = hexutil.Uint64(h.GasLimit) - enc.GasUsed = hexutil.Uint64(h.GasUsed) - enc.Time = hexutil.Uint64(h.Time) - enc.Extra = h.Extra - enc.MixDigest = h.MixDigest - enc.Nonce = h.Nonce - enc.Hash = h.Hash() - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (h *Header) UnmarshalJSON(input []byte) error { - type Header struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase *common.Address `json:"miner" gencodec:"required"` - Root *common.Hash `json:"stateRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom *Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest *common.Hash `json:"mixHash"` - Nonce *BlockNonce `json:"nonce"` - } - var dec Header - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.ParentHash == nil { - return errors.New("missing required field 'parentHash' for Header") - } - h.ParentHash = *dec.ParentHash - if dec.UncleHash == nil { - return errors.New("missing required field 'sha3Uncles' for Header") - } - h.UncleHash = *dec.UncleHash - if dec.Coinbase == nil { - return errors.New("missing required field 'miner' for Header") - } - h.Coinbase = *dec.Coinbase - if dec.Root == nil { - return errors.New("missing required field 'stateRoot' for Header") - } - h.Root = *dec.Root - if dec.TxHash == nil { - return errors.New("missing required field 'transactionsRoot' for Header") - } - h.TxHash = *dec.TxHash - if dec.ReceiptHash == nil { - return errors.New("missing required field 'receiptsRoot' for Header") - } - h.ReceiptHash = *dec.ReceiptHash - if dec.Bloom == nil { - return errors.New("missing required field 'logsBloom' for Header") - } - h.Bloom = *dec.Bloom - if dec.Difficulty == nil { - return errors.New("missing required field 'difficulty' for Header") - } - h.Difficulty = (*big.Int)(dec.Difficulty) - if dec.Number == nil { - return errors.New("missing required field 'number' for Header") - } - h.Number = (*big.Int)(dec.Number) - if dec.GasLimit == nil { - return errors.New("missing required field 'gasLimit' for Header") - } - h.GasLimit = uint64(*dec.GasLimit) - if dec.GasUsed == nil { - return errors.New("missing required field 'gasUsed' for Header") - } - h.GasUsed = uint64(*dec.GasUsed) - if dec.Time == nil { - return errors.New("missing required field 'timestamp' for Header") - } - h.Time = uint64(*dec.Time) - if dec.Extra == nil { - return errors.New("missing required field 'extraData' for Header") - } - h.Extra = *dec.Extra - if dec.MixDigest != nil { - h.MixDigest = *dec.MixDigest - } - if dec.Nonce != nil { - h.Nonce = *dec.Nonce - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_log_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_log_json.go deleted file mode 100644 index 6e94339..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_log_json.go +++ /dev/null @@ -1,92 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -var _ = (*logMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (l Log) MarshalJSON() ([]byte, error) { - type Log struct { - Address common.Address `json:"address" gencodec:"required"` - Topics []common.Hash `json:"topics" gencodec:"required"` - Data hexutil.Bytes `json:"data" gencodec:"required"` - BlockNumber hexutil.Uint64 `json:"blockNumber"` - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex hexutil.Uint `json:"transactionIndex" gencodec:"required"` - BlockHash common.Hash `json:"blockHash"` - Index hexutil.Uint `json:"logIndex" gencodec:"required"` - Removed bool `json:"removed"` - } - var enc Log - enc.Address = l.Address - enc.Topics = l.Topics - enc.Data = l.Data - enc.BlockNumber = hexutil.Uint64(l.BlockNumber) - enc.TxHash = l.TxHash - enc.TxIndex = hexutil.Uint(l.TxIndex) - enc.BlockHash = l.BlockHash - enc.Index = hexutil.Uint(l.Index) - enc.Removed = l.Removed - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (l *Log) UnmarshalJSON(input []byte) error { - type Log struct { - Address *common.Address `json:"address" gencodec:"required"` - Topics []common.Hash `json:"topics" gencodec:"required"` - Data *hexutil.Bytes `json:"data" gencodec:"required"` - BlockNumber *hexutil.Uint64 `json:"blockNumber"` - TxHash *common.Hash `json:"transactionHash" gencodec:"required"` - TxIndex *hexutil.Uint `json:"transactionIndex" gencodec:"required"` - BlockHash *common.Hash `json:"blockHash"` - Index *hexutil.Uint `json:"logIndex" gencodec:"required"` - Removed *bool `json:"removed"` - } - var dec Log - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.Address == nil { - return errors.New("missing required field 'address' for Log") - } - l.Address = *dec.Address - if dec.Topics == nil { - return errors.New("missing required field 'topics' for Log") - } - l.Topics = dec.Topics - if dec.Data == nil { - return errors.New("missing required field 'data' for Log") - } - l.Data = *dec.Data - if dec.BlockNumber != nil { - l.BlockNumber = uint64(*dec.BlockNumber) - } - if dec.TxHash == nil { - return errors.New("missing required field 'transactionHash' for Log") - } - l.TxHash = *dec.TxHash - if dec.TxIndex == nil { - return errors.New("missing required field 'transactionIndex' for Log") - } - l.TxIndex = uint(*dec.TxIndex) - if dec.BlockHash != nil { - l.BlockHash = *dec.BlockHash - } - if dec.Index == nil { - return errors.New("missing required field 'logIndex' for Log") - } - l.Index = uint(*dec.Index) - if dec.Removed != nil { - l.Removed = *dec.Removed - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_receipt_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_receipt_json.go deleted file mode 100644 index 790ed65..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_receipt_json.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -var _ = (*receiptMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (r Receipt) MarshalJSON() ([]byte, error) { - type Receipt struct { - PostState hexutil.Bytes `json:"root"` - Status hexutil.Uint64 `json:"status"` - CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Logs []*Log `json:"logs" gencodec:"required"` - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress common.Address `json:"contractAddress"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - BlockHash common.Hash `json:"blockHash,omitempty"` - BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` - TransactionIndex hexutil.Uint `json:"transactionIndex"` - } - var enc Receipt - enc.PostState = r.PostState - enc.Status = hexutil.Uint64(r.Status) - enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed) - enc.Bloom = r.Bloom - enc.Logs = r.Logs - enc.TxHash = r.TxHash - enc.ContractAddress = r.ContractAddress - enc.GasUsed = hexutil.Uint64(r.GasUsed) - enc.BlockHash = r.BlockHash - enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) - enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (r *Receipt) UnmarshalJSON(input []byte) error { - type Receipt struct { - PostState *hexutil.Bytes `json:"root"` - Status *hexutil.Uint64 `json:"status"` - CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom *Bloom `json:"logsBloom" gencodec:"required"` - Logs []*Log `json:"logs" gencodec:"required"` - TxHash *common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress *common.Address `json:"contractAddress"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - BlockHash *common.Hash `json:"blockHash,omitempty"` - BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` - TransactionIndex *hexutil.Uint `json:"transactionIndex"` - } - var dec Receipt - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.PostState != nil { - r.PostState = *dec.PostState - } - if dec.Status != nil { - r.Status = uint64(*dec.Status) - } - if dec.CumulativeGasUsed == nil { - return errors.New("missing required field 'cumulativeGasUsed' for Receipt") - } - r.CumulativeGasUsed = uint64(*dec.CumulativeGasUsed) - if dec.Bloom == nil { - return errors.New("missing required field 'logsBloom' for Receipt") - } - r.Bloom = *dec.Bloom - if dec.Logs == nil { - return errors.New("missing required field 'logs' for Receipt") - } - r.Logs = dec.Logs - if dec.TxHash == nil { - return errors.New("missing required field 'transactionHash' for Receipt") - } - r.TxHash = *dec.TxHash - if dec.ContractAddress != nil { - r.ContractAddress = *dec.ContractAddress - } - if dec.GasUsed == nil { - return errors.New("missing required field 'gasUsed' for Receipt") - } - r.GasUsed = uint64(*dec.GasUsed) - if dec.BlockHash != nil { - r.BlockHash = *dec.BlockHash - } - if dec.BlockNumber != nil { - r.BlockNumber = (*big.Int)(dec.BlockNumber) - } - if dec.TransactionIndex != nil { - r.TransactionIndex = uint(*dec.TransactionIndex) - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_tx_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_tx_json.go deleted file mode 100644 index e676058..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_tx_json.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -var _ = (*txdataMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (t txdata) MarshalJSON() ([]byte, error) { - type txdata struct { - AccountNonce hexutil.Uint64 `json:"nonce" gencodec:"required"` - Price *hexutil.Big `json:"gasPrice" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` - Amount *hexutil.Big `json:"value" gencodec:"required"` - Payload hexutil.Bytes `json:"input" gencodec:"required"` - V *hexutil.Big `json:"v" gencodec:"required"` - R *hexutil.Big `json:"r" gencodec:"required"` - S *hexutil.Big `json:"s" gencodec:"required"` - Hash *common.Hash `json:"hash" rlp:"-"` - } - var enc txdata - enc.AccountNonce = hexutil.Uint64(t.AccountNonce) - enc.Price = (*hexutil.Big)(t.Price) - enc.GasLimit = hexutil.Uint64(t.GasLimit) - enc.Recipient = t.Recipient - enc.Amount = (*hexutil.Big)(t.Amount) - enc.Payload = t.Payload - enc.V = (*hexutil.Big)(t.V) - enc.R = (*hexutil.Big)(t.R) - enc.S = (*hexutil.Big)(t.S) - enc.Hash = t.Hash - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (t *txdata) UnmarshalJSON(input []byte) error { - type txdata struct { - AccountNonce *hexutil.Uint64 `json:"nonce" gencodec:"required"` - Price *hexutil.Big `json:"gasPrice" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` - Amount *hexutil.Big `json:"value" gencodec:"required"` - Payload *hexutil.Bytes `json:"input" gencodec:"required"` - V *hexutil.Big `json:"v" gencodec:"required"` - R *hexutil.Big `json:"r" gencodec:"required"` - S *hexutil.Big `json:"s" gencodec:"required"` - Hash *common.Hash `json:"hash" rlp:"-"` - } - var dec txdata - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.AccountNonce == nil { - return errors.New("missing required field 'nonce' for txdata") - } - t.AccountNonce = uint64(*dec.AccountNonce) - if dec.Price == nil { - return errors.New("missing required field 'gasPrice' for txdata") - } - t.Price = (*big.Int)(dec.Price) - if dec.GasLimit == nil { - return errors.New("missing required field 'gas' for txdata") - } - t.GasLimit = uint64(*dec.GasLimit) - if dec.Recipient != nil { - t.Recipient = dec.Recipient - } - if dec.Amount == nil { - return errors.New("missing required field 'value' for txdata") - } - t.Amount = (*big.Int)(dec.Amount) - if dec.Payload == nil { - return errors.New("missing required field 'input' for txdata") - } - t.Payload = *dec.Payload - if dec.V == nil { - return errors.New("missing required field 'v' for txdata") - } - t.V = (*big.Int)(dec.V) - if dec.R == nil { - return errors.New("missing required field 'r' for txdata") - } - t.R = (*big.Int)(dec.R) - if dec.S == nil { - return errors.New("missing required field 's' for txdata") - } - t.S = (*big.Int)(dec.S) - if dec.Hash != nil { - t.Hash = dec.Hash - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/log.go b/vendor/github.com/ethereum/go-ethereum/core/types/log.go deleted file mode 100644 index 006f62b..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/log.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "io" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/rlp" -) - -//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go - -// Log represents a contract log event. These events are generated by the LOG opcode and -// stored/indexed by the node. -type Log struct { - // Consensus fields: - // address of the contract that generated the event - Address common.Address `json:"address" gencodec:"required"` - // list of topics provided by the contract. - Topics []common.Hash `json:"topics" gencodec:"required"` - // supplied by the contract, usually ABI-encoded - Data []byte `json:"data" gencodec:"required"` - - // Derived fields. These fields are filled in by the node - // but not secured by consensus. - // block in which the transaction was included - BlockNumber uint64 `json:"blockNumber"` - // hash of the transaction - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - // index of the transaction in the block - TxIndex uint `json:"transactionIndex" gencodec:"required"` - // hash of the block in which the transaction was included - BlockHash common.Hash `json:"blockHash"` - // index of the log in the block - Index uint `json:"logIndex" gencodec:"required"` - - // The Removed field is true if this log was reverted due to a chain reorganisation. - // You must pay attention to this field if you receive logs through a filter query. - Removed bool `json:"removed"` -} - -type logMarshaling struct { - Data hexutil.Bytes - BlockNumber hexutil.Uint64 - TxIndex hexutil.Uint - Index hexutil.Uint -} - -type rlpLog struct { - Address common.Address - Topics []common.Hash - Data []byte -} - -// rlpStorageLog is the storage encoding of a log. -type rlpStorageLog rlpLog - -// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. -type legacyRlpStorageLog struct { - Address common.Address - Topics []common.Hash - Data []byte - BlockNumber uint64 - TxHash common.Hash - TxIndex uint - BlockHash common.Hash - Index uint -} - -// EncodeRLP implements rlp.Encoder. -func (l *Log) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}) -} - -// DecodeRLP implements rlp.Decoder. -func (l *Log) DecodeRLP(s *rlp.Stream) error { - var dec rlpLog - err := s.Decode(&dec) - if err == nil { - l.Address, l.Topics, l.Data = dec.Address, dec.Topics, dec.Data - } - return err -} - -// LogForStorage is a wrapper around a Log that flattens and parses the entire content of -// a log including non-consensus fields. -type LogForStorage Log - -// EncodeRLP implements rlp.Encoder. -func (l *LogForStorage) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, rlpStorageLog{ - Address: l.Address, - Topics: l.Topics, - Data: l.Data, - }) -} - -// DecodeRLP implements rlp.Decoder. -// -// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. -func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { - blob, err := s.Raw() - if err != nil { - return err - } - var dec rlpStorageLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } else { - // Try to decode log with previous definition. - var dec legacyRlpStorageLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } - } - return err -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/receipt.go b/vendor/github.com/ethereum/go-ethereum/core/types/receipt.go deleted file mode 100644 index a96c752..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/receipt.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/big" - "unsafe" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" -) - -//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go - -var ( - receiptStatusFailedRLP = []byte{} - receiptStatusSuccessfulRLP = []byte{0x01} -) - -const ( - // ReceiptStatusFailed is the status code of a transaction if execution failed. - ReceiptStatusFailed = uint64(0) - - // ReceiptStatusSuccessful is the status code of a transaction if execution succeeded. - ReceiptStatusSuccessful = uint64(1) -) - -// Receipt represents the results of a transaction. -type Receipt struct { - // Consensus fields: These fields are defined by the Yellow Paper - PostState []byte `json:"root"` - Status uint64 `json:"status"` - CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Logs []*Log `json:"logs" gencodec:"required"` - - // Implementation fields: These fields are added by geth when processing a transaction. - // They are stored in the chain database. - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress common.Address `json:"contractAddress"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` - - // Inclusion information: These fields provide information about the inclusion of the - // transaction corresponding to this receipt. - BlockHash common.Hash `json:"blockHash,omitempty"` - BlockNumber *big.Int `json:"blockNumber,omitempty"` - TransactionIndex uint `json:"transactionIndex"` -} - -type receiptMarshaling struct { - PostState hexutil.Bytes - Status hexutil.Uint64 - CumulativeGasUsed hexutil.Uint64 - GasUsed hexutil.Uint64 - BlockNumber *hexutil.Big - TransactionIndex hexutil.Uint -} - -// receiptRLP is the consensus encoding of a receipt. -type receiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Bloom Bloom - Logs []*Log -} - -// storedReceiptRLP is the storage encoding of a receipt. -type storedReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Logs []*LogForStorage -} - -// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. -type v4StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 -} - -// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. -type v3StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Bloom Bloom - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 -} - -// NewReceipt creates a barebone transaction receipt, copying the init fields. -func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt { - r := &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: cumulativeGasUsed} - if failed { - r.Status = ReceiptStatusFailed - } else { - r.Status = ReceiptStatusSuccessful - } - return r -} - -// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt -// into an RLP stream. If no post state is present, byzantium fork is assumed. -func (r *Receipt) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}) -} - -// DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt -// from an RLP stream. -func (r *Receipt) DecodeRLP(s *rlp.Stream) error { - var dec receiptRLP - if err := s.Decode(&dec); err != nil { - return err - } - if err := r.setStatus(dec.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed, r.Bloom, r.Logs = dec.CumulativeGasUsed, dec.Bloom, dec.Logs - return nil -} - -func (r *Receipt) setStatus(postStateOrStatus []byte) error { - switch { - case bytes.Equal(postStateOrStatus, receiptStatusSuccessfulRLP): - r.Status = ReceiptStatusSuccessful - case bytes.Equal(postStateOrStatus, receiptStatusFailedRLP): - r.Status = ReceiptStatusFailed - case len(postStateOrStatus) == len(common.Hash{}): - r.PostState = postStateOrStatus - default: - return fmt.Errorf("invalid receipt status %x", postStateOrStatus) - } - return nil -} - -func (r *Receipt) statusEncoding() []byte { - if len(r.PostState) == 0 { - if r.Status == ReceiptStatusFailed { - return receiptStatusFailedRLP - } - return receiptStatusSuccessfulRLP - } - return r.PostState -} - -// Size returns the approximate memory used by all internal contents. It is used -// to approximate and limit the memory consumption of various caches. -func (r *Receipt) Size() common.StorageSize { - size := common.StorageSize(unsafe.Sizeof(*r)) + common.StorageSize(len(r.PostState)) - - size += common.StorageSize(len(r.Logs)) * common.StorageSize(unsafe.Sizeof(Log{})) - for _, log := range r.Logs { - size += common.StorageSize(len(log.Topics)*common.HashLength + len(log.Data)) - } - return size -} - -// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the -// entire content of a receipt, as opposed to only the consensus fields originally. -type ReceiptForStorage Receipt - -// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt -// into an RLP stream. -func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { - enc := &storedReceiptRLP{ - PostStateOrStatus: (*Receipt)(r).statusEncoding(), - CumulativeGasUsed: r.CumulativeGasUsed, - Logs: make([]*LogForStorage, len(r.Logs)), - } - for i, log := range r.Logs { - enc.Logs[i] = (*LogForStorage)(log) - } - return rlp.Encode(w, enc) -} - -// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation -// fields of a receipt from an RLP stream. -func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { - // Retrieve the entire receipt blob as we need to try multiple decoders - blob, err := s.Raw() - if err != nil { - return err - } - // Try decoding from the newest format for future proofness, then the older one - // for old nodes that just upgraded. V4 was an intermediate unreleased format so - // we do need to decode it, but it's not common (try last). - if err := decodeStoredReceiptRLP(r, blob); err == nil { - return nil - } - if err := decodeV3StoredReceiptRLP(r, blob); err == nil { - return nil - } - return decodeV4StoredReceiptRLP(r, blob) -} - -func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored storedReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} - -func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v4StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} - -func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v3StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Bloom = stored.Bloom - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - return nil -} - -// Receipts is a wrapper around a Receipt array to implement DerivableList. -type Receipts []*Receipt - -// Len returns the number of receipts in this list. -func (r Receipts) Len() int { return len(r) } - -// GetRlp returns the RLP encoding of one receipt from the list. -func (r Receipts) GetRlp(i int) []byte { - bytes, err := rlp.EncodeToBytes(r[i]) - if err != nil { - panic(err) - } - return bytes -} - -// DeriveFields fills the receipts with their computed fields based on consensus -// data and contextual infos like containing block and transactions. -func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error { - signer := MakeSigner(config, new(big.Int).SetUint64(number)) - - logIndex := uint(0) - if len(txs) != len(r) { - return errors.New("transaction and receipt count mismatch") - } - for i := 0; i < len(r); i++ { - // The transaction hash can be retrieved from the transaction itself - r[i].TxHash = txs[i].Hash() - - // block location fields - r[i].BlockHash = hash - r[i].BlockNumber = new(big.Int).SetUint64(number) - r[i].TransactionIndex = uint(i) - - // The contract address can be derived from the transaction itself - if txs[i].To() == nil { - // Deriving the signer is expensive, only do if it's actually needed - from, _ := Sender(signer, txs[i]) - r[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) - } - // The used gas can be calculated based on previous r - if i == 0 { - r[i].GasUsed = r[i].CumulativeGasUsed - } else { - r[i].GasUsed = r[i].CumulativeGasUsed - r[i-1].CumulativeGasUsed - } - // The derived log fields can simply be set from the block and transaction - for j := 0; j < len(r[i].Logs); j++ { - r[i].Logs[j].BlockNumber = number - r[i].Logs[j].BlockHash = hash - r[i].Logs[j].TxHash = r[i].TxHash - r[i].Logs[j].TxIndex = uint(i) - r[i].Logs[j].Index = logIndex - logIndex++ - } - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go b/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go deleted file mode 100644 index 3eb8df0..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/transaction.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "container/heap" - "errors" - "io" - "math/big" - "sync/atomic" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" -) - -//go:generate gencodec -type txdata -field-override txdataMarshaling -out gen_tx_json.go - -var ( - ErrInvalidSig = errors.New("invalid transaction v, r, s values") -) - -type Transaction struct { - data txdata - // caches - hash atomic.Value - size atomic.Value - from atomic.Value -} - -type txdata struct { - AccountNonce uint64 `json:"nonce" gencodec:"required"` - Price *big.Int `json:"gasPrice" gencodec:"required"` - GasLimit uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation - Amount *big.Int `json:"value" gencodec:"required"` - Payload []byte `json:"input" gencodec:"required"` - - // Signature values - V *big.Int `json:"v" gencodec:"required"` - R *big.Int `json:"r" gencodec:"required"` - S *big.Int `json:"s" gencodec:"required"` - - // This is only used when marshaling to JSON. - Hash *common.Hash `json:"hash" rlp:"-"` -} - -type txdataMarshaling struct { - AccountNonce hexutil.Uint64 - Price *hexutil.Big - GasLimit hexutil.Uint64 - Amount *hexutil.Big - Payload hexutil.Bytes - V *hexutil.Big - R *hexutil.Big - S *hexutil.Big -} - -func NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - return newTransaction(nonce, &to, amount, gasLimit, gasPrice, data) -} - -func NewContractCreation(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - return newTransaction(nonce, nil, amount, gasLimit, gasPrice, data) -} - -func newTransaction(nonce uint64, to *common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - if len(data) > 0 { - data = common.CopyBytes(data) - } - d := txdata{ - AccountNonce: nonce, - Recipient: to, - Payload: data, - Amount: new(big.Int), - GasLimit: gasLimit, - Price: new(big.Int), - V: new(big.Int), - R: new(big.Int), - S: new(big.Int), - } - if amount != nil { - d.Amount.Set(amount) - } - if gasPrice != nil { - d.Price.Set(gasPrice) - } - - return &Transaction{data: d} -} - -// ChainId returns which chain id this transaction was signed for (if at all) -func (tx *Transaction) ChainId() *big.Int { - return deriveChainId(tx.data.V) -} - -// Protected returns whether the transaction is protected from replay protection. -func (tx *Transaction) Protected() bool { - return isProtectedV(tx.data.V) -} - -func isProtectedV(V *big.Int) bool { - if V.BitLen() <= 8 { - v := V.Uint64() - return v != 27 && v != 28 - } - // anything not 27 or 28 is considered protected - return true -} - -// EncodeRLP implements rlp.Encoder -func (tx *Transaction) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &tx.data) -} - -// DecodeRLP implements rlp.Decoder -func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - err := s.Decode(&tx.data) - if err == nil { - tx.size.Store(common.StorageSize(rlp.ListSize(size))) - } - - return err -} - -// MarshalJSON encodes the web3 RPC transaction format. -func (tx *Transaction) MarshalJSON() ([]byte, error) { - hash := tx.Hash() - data := tx.data - data.Hash = &hash - return data.MarshalJSON() -} - -// UnmarshalJSON decodes the web3 RPC transaction format. -func (tx *Transaction) UnmarshalJSON(input []byte) error { - var dec txdata - if err := dec.UnmarshalJSON(input); err != nil { - return err - } - - withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0 - if withSignature { - var V byte - if isProtectedV(dec.V) { - chainID := deriveChainId(dec.V).Uint64() - V = byte(dec.V.Uint64() - 35 - 2*chainID) - } else { - V = byte(dec.V.Uint64() - 27) - } - if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) { - return ErrInvalidSig - } - } - - *tx = Transaction{data: dec} - return nil -} - -func (tx *Transaction) Data() []byte { return common.CopyBytes(tx.data.Payload) } -func (tx *Transaction) Gas() uint64 { return tx.data.GasLimit } -func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.data.Price) } -func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.data.Amount) } -func (tx *Transaction) Nonce() uint64 { return tx.data.AccountNonce } -func (tx *Transaction) CheckNonce() bool { return true } - -// To returns the recipient address of the transaction. -// It returns nil if the transaction is a contract creation. -func (tx *Transaction) To() *common.Address { - if tx.data.Recipient == nil { - return nil - } - to := *tx.data.Recipient - return &to -} - -// Hash hashes the RLP encoding of tx. -// It uniquely identifies the transaction. -func (tx *Transaction) Hash() common.Hash { - if hash := tx.hash.Load(); hash != nil { - return hash.(common.Hash) - } - v := rlpHash(tx) - tx.hash.Store(v) - return v -} - -// Size returns the true RLP encoded storage size of the transaction, either by -// encoding and returning it, or returning a previsouly cached value. -func (tx *Transaction) Size() common.StorageSize { - if size := tx.size.Load(); size != nil { - return size.(common.StorageSize) - } - c := writeCounter(0) - rlp.Encode(&c, &tx.data) - tx.size.Store(common.StorageSize(c)) - return common.StorageSize(c) -} - -// AsMessage returns the transaction as a core.Message. -// -// AsMessage requires a signer to derive the sender. -// -// XXX Rename message to something less arbitrary? -func (tx *Transaction) AsMessage(s Signer) (Message, error) { - msg := Message{ - nonce: tx.data.AccountNonce, - gasLimit: tx.data.GasLimit, - gasPrice: new(big.Int).Set(tx.data.Price), - to: tx.data.Recipient, - amount: tx.data.Amount, - data: tx.data.Payload, - checkNonce: true, - } - - var err error - msg.from, err = Sender(s, tx) - return msg, err -} - -// WithSignature returns a new transaction with the given signature. -// This signature needs to be in the [R || S || V] format where V is 0 or 1. -func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) { - r, s, v, err := signer.SignatureValues(tx, sig) - if err != nil { - return nil, err - } - cpy := &Transaction{data: tx.data} - cpy.data.R, cpy.data.S, cpy.data.V = r, s, v - return cpy, nil -} - -// Cost returns amount + gasprice * gaslimit. -func (tx *Transaction) Cost() *big.Int { - total := new(big.Int).Mul(tx.data.Price, new(big.Int).SetUint64(tx.data.GasLimit)) - total.Add(total, tx.data.Amount) - return total -} - -// RawSignatureValues returns the V, R, S signature values of the transaction. -// The return values should not be modified by the caller. -func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { - return tx.data.V, tx.data.R, tx.data.S -} - -// Transactions is a Transaction slice type for basic sorting. -type Transactions []*Transaction - -// Len returns the length of s. -func (s Transactions) Len() int { return len(s) } - -// Swap swaps the i'th and the j'th element in s. -func (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// GetRlp implements Rlpable and returns the i'th element of s in rlp. -func (s Transactions) GetRlp(i int) []byte { - enc, _ := rlp.EncodeToBytes(s[i]) - return enc -} - -// TxDifference returns a new set which is the difference between a and b. -func TxDifference(a, b Transactions) Transactions { - keep := make(Transactions, 0, len(a)) - - remove := make(map[common.Hash]struct{}) - for _, tx := range b { - remove[tx.Hash()] = struct{}{} - } - - for _, tx := range a { - if _, ok := remove[tx.Hash()]; !ok { - keep = append(keep, tx) - } - } - - return keep -} - -// TxByNonce implements the sort interface to allow sorting a list of transactions -// by their nonces. This is usually only useful for sorting transactions from a -// single account, otherwise a nonce comparison doesn't make much sense. -type TxByNonce Transactions - -func (s TxByNonce) Len() int { return len(s) } -func (s TxByNonce) Less(i, j int) bool { return s[i].data.AccountNonce < s[j].data.AccountNonce } -func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// TxByPrice implements both the sort and the heap interface, making it useful -// for all at once sorting as well as individually adding and removing elements. -type TxByPrice Transactions - -func (s TxByPrice) Len() int { return len(s) } -func (s TxByPrice) Less(i, j int) bool { return s[i].data.Price.Cmp(s[j].data.Price) > 0 } -func (s TxByPrice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s *TxByPrice) Push(x interface{}) { - *s = append(*s, x.(*Transaction)) -} - -func (s *TxByPrice) Pop() interface{} { - old := *s - n := len(old) - x := old[n-1] - *s = old[0 : n-1] - return x -} - -// TransactionsByPriceAndNonce represents a set of transactions that can return -// transactions in a profit-maximizing sorted order, while supporting removing -// entire batches of transactions for non-executable accounts. -type TransactionsByPriceAndNonce struct { - txs map[common.Address]Transactions // Per account nonce-sorted list of transactions - heads TxByPrice // Next transaction for each unique account (price heap) - signer Signer // Signer for the set of transactions -} - -// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve -// price sorted transactions in a nonce-honouring way. -// -// Note, the input map is reowned so the caller should not interact any more with -// if after providing it to the constructor. -func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions) *TransactionsByPriceAndNonce { - // Initialize a price based heap with the head transactions - heads := make(TxByPrice, 0, len(txs)) - for from, accTxs := range txs { - heads = append(heads, accTxs[0]) - // Ensure the sender address is from the signer - acc, _ := Sender(signer, accTxs[0]) - txs[acc] = accTxs[1:] - if from != acc { - delete(txs, from) - } - } - heap.Init(&heads) - - // Assemble and return the transaction set - return &TransactionsByPriceAndNonce{ - txs: txs, - heads: heads, - signer: signer, - } -} - -// Peek returns the next transaction by price. -func (t *TransactionsByPriceAndNonce) Peek() *Transaction { - if len(t.heads) == 0 { - return nil - } - return t.heads[0] -} - -// Shift replaces the current best head with the next one from the same account. -func (t *TransactionsByPriceAndNonce) Shift() { - acc, _ := Sender(t.signer, t.heads[0]) - if txs, ok := t.txs[acc]; ok && len(txs) > 0 { - t.heads[0], t.txs[acc] = txs[0], txs[1:] - heap.Fix(&t.heads, 0) - } else { - heap.Pop(&t.heads) - } -} - -// Pop removes the best transaction, *not* replacing it with the next one from -// the same account. This should be used when a transaction cannot be executed -// and hence all subsequent ones should be discarded from the same account. -func (t *TransactionsByPriceAndNonce) Pop() { - heap.Pop(&t.heads) -} - -// Message is a fully derived transaction and implements core.Message -// -// NOTE: In a future PR this will be removed. -type Message struct { - to *common.Address - from common.Address - nonce uint64 - amount *big.Int - gasLimit uint64 - gasPrice *big.Int - data []byte - checkNonce bool -} - -func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte, checkNonce bool) Message { - return Message{ - from: from, - to: to, - nonce: nonce, - amount: amount, - gasLimit: gasLimit, - gasPrice: gasPrice, - data: data, - checkNonce: checkNonce, - } -} - -func (m Message) From() common.Address { return m.from } -func (m Message) To() *common.Address { return m.to } -func (m Message) GasPrice() *big.Int { return m.gasPrice } -func (m Message) Value() *big.Int { return m.amount } -func (m Message) Gas() uint64 { return m.gasLimit } -func (m Message) Nonce() uint64 { return m.nonce } -func (m Message) Data() []byte { return m.data } -func (m Message) CheckNonce() bool { return m.checkNonce } diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go b/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go deleted file mode 100644 index 842fedb..0000000 --- a/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" -) - -var ( - ErrInvalidChainId = errors.New("invalid chain id for signer") -) - -// sigCache is used to cache the derived sender and contains -// the signer used to derive it. -type sigCache struct { - signer Signer - from common.Address -} - -// MakeSigner returns a Signer based on the given chain config and block number. -func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { - var signer Signer - switch { - case config.IsEIP155(blockNumber): - signer = NewEIP155Signer(config.ChainID) - case config.IsHomestead(blockNumber): - signer = HomesteadSigner{} - default: - signer = FrontierSigner{} - } - return signer -} - -// SignTx signs the transaction using the given signer and private key -func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, error) { - h := s.Hash(tx) - sig, err := crypto.Sign(h[:], prv) - if err != nil { - return nil, err - } - return tx.WithSignature(s, sig) -} - -// Sender returns the address derived from the signature (V, R, S) using secp256k1 -// elliptic curve and an error if it failed deriving or upon an incorrect -// signature. -// -// Sender may cache the address, allowing it to be used regardless of -// signing method. The cache is invalidated if the cached signer does -// not match the signer used in the current call. -func Sender(signer Signer, tx *Transaction) (common.Address, error) { - if sc := tx.from.Load(); sc != nil { - sigCache := sc.(sigCache) - // If the signer used to derive from in a previous - // call is not the same as used current, invalidate - // the cache. - if sigCache.signer.Equal(signer) { - return sigCache.from, nil - } - } - - addr, err := signer.Sender(tx) - if err != nil { - return common.Address{}, err - } - tx.from.Store(sigCache{signer: signer, from: addr}) - return addr, nil -} - -// Signer encapsulates transaction signature handling. Note that this interface is not a -// stable API and may change at any time to accommodate new protocol rules. -type Signer interface { - // Sender returns the sender address of the transaction. - Sender(tx *Transaction) (common.Address, error) - // SignatureValues returns the raw R, S, V values corresponding to the - // given signature. - SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) - // Hash returns the hash to be signed. - Hash(tx *Transaction) common.Hash - // Equal returns true if the given signer is the same as the receiver. - Equal(Signer) bool -} - -// EIP155Transaction implements Signer using the EIP155 rules. -type EIP155Signer struct { - chainId, chainIdMul *big.Int -} - -func NewEIP155Signer(chainId *big.Int) EIP155Signer { - if chainId == nil { - chainId = new(big.Int) - } - return EIP155Signer{ - chainId: chainId, - chainIdMul: new(big.Int).Mul(chainId, big.NewInt(2)), - } -} - -func (s EIP155Signer) Equal(s2 Signer) bool { - eip155, ok := s2.(EIP155Signer) - return ok && eip155.chainId.Cmp(s.chainId) == 0 -} - -var big8 = big.NewInt(8) - -func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) { - if !tx.Protected() { - return HomesteadSigner{}.Sender(tx) - } - if tx.ChainId().Cmp(s.chainId) != 0 { - return common.Address{}, ErrInvalidChainId - } - V := new(big.Int).Sub(tx.data.V, s.chainIdMul) - V.Sub(V, big8) - return recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true) -} - -// SignatureValues returns signature values. This signature -// needs to be in the [R || S || V] format where V is 0 or 1. -func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { - R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig) - if err != nil { - return nil, nil, nil, err - } - if s.chainId.Sign() != 0 { - V = big.NewInt(int64(sig[64] + 35)) - V.Add(V, s.chainIdMul) - } - return R, S, V, nil -} - -// Hash returns the hash to be signed by the sender. -// It does not uniquely identify the transaction. -func (s EIP155Signer) Hash(tx *Transaction) common.Hash { - return rlpHash([]interface{}{ - tx.data.AccountNonce, - tx.data.Price, - tx.data.GasLimit, - tx.data.Recipient, - tx.data.Amount, - tx.data.Payload, - s.chainId, uint(0), uint(0), - }) -} - -// HomesteadTransaction implements TransactionInterface using the -// homestead rules. -type HomesteadSigner struct{ FrontierSigner } - -func (s HomesteadSigner) Equal(s2 Signer) bool { - _, ok := s2.(HomesteadSigner) - return ok -} - -// SignatureValues returns signature values. This signature -// needs to be in the [R || S || V] format where V is 0 or 1. -func (hs HomesteadSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) { - return hs.FrontierSigner.SignatureValues(tx, sig) -} - -func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) { - return recoverPlain(hs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, true) -} - -type FrontierSigner struct{} - -func (s FrontierSigner) Equal(s2 Signer) bool { - _, ok := s2.(FrontierSigner) - return ok -} - -// SignatureValues returns signature values. This signature -// needs to be in the [R || S || V] format where V is 0 or 1. -func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) { - if len(sig) != crypto.SignatureLength { - panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength)) - } - r = new(big.Int).SetBytes(sig[:32]) - s = new(big.Int).SetBytes(sig[32:64]) - v = new(big.Int).SetBytes([]byte{sig[64] + 27}) - return r, s, v, nil -} - -// Hash returns the hash to be signed by the sender. -// It does not uniquely identify the transaction. -func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { - return rlpHash([]interface{}{ - tx.data.AccountNonce, - tx.data.Price, - tx.data.GasLimit, - tx.data.Recipient, - tx.data.Amount, - tx.data.Payload, - }) -} - -func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) { - return recoverPlain(fs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, false) -} - -func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) { - if Vb.BitLen() > 8 { - return common.Address{}, ErrInvalidSig - } - V := byte(Vb.Uint64() - 27) - if !crypto.ValidateSignatureValues(V, R, S, homestead) { - return common.Address{}, ErrInvalidSig - } - // encode the signature in uncompressed format - r, s := R.Bytes(), S.Bytes() - sig := make([]byte, crypto.SignatureLength) - copy(sig[32-len(r):32], r) - copy(sig[64-len(s):64], s) - sig[64] = V - // recover the public key from the signature - pub, err := crypto.Ecrecover(sighash[:], sig) - if err != nil { - return common.Address{}, err - } - if len(pub) == 0 || pub[0] != 4 { - return common.Address{}, errors.New("invalid public key") - } - var addr common.Address - copy(addr[:], crypto.Keccak256(pub[1:])[12:]) - return addr, nil -} - -// deriveChainId derives the chain id from the given v parameter -func deriveChainId(v *big.Int) *big.Int { - if v.BitLen() <= 64 { - v := v.Uint64() - if v == 27 || v == 28 { - return new(big.Int) - } - return new(big.Int).SetUint64((v - 35) / 2) - } - v = new(big.Int).Sub(v, big.NewInt(35)) - return v.Div(v, big.NewInt(2)) -} diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/batch.go b/vendor/github.com/ethereum/go-ethereum/ethdb/batch.go deleted file mode 100644 index e261415..0000000 --- a/vendor/github.com/ethereum/go-ethereum/ethdb/batch.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethdb - -// IdealBatchSize defines the size of the data batches should ideally add in one -// write. -const IdealBatchSize = 100 * 1024 - -// Batch is a write-only database that commits changes to its host database -// when Write is called. A batch cannot be used concurrently. -type Batch interface { - KeyValueWriter - - // ValueSize retrieves the amount of data queued up for writing. - ValueSize() int - - // Write flushes any accumulated data to disk. - Write() error - - // Reset resets the batch for reuse. - Reset() - - // Replay replays the batch contents. - Replay(w KeyValueWriter) error -} - -// Batcher wraps the NewBatch method of a backing data store. -type Batcher interface { - // NewBatch creates a write-only database that buffers changes to its host db - // until a final write is called. - NewBatch() Batch -} diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go b/vendor/github.com/ethereum/go-ethereum/ethdb/database.go deleted file mode 100644 index 0dc1462..0000000 --- a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package ethdb defines the interfaces for an Ethereum data store. -package ethdb - -import "io" - -// KeyValueReader wraps the Has and Get method of a backing data store. -type KeyValueReader interface { - // Has retrieves if a key is present in the key-value data store. - Has(key []byte) (bool, error) - - // Get retrieves the given key if it's present in the key-value data store. - Get(key []byte) ([]byte, error) -} - -// KeyValueWriter wraps the Put method of a backing data store. -type KeyValueWriter interface { - // Put inserts the given value into the key-value data store. - Put(key []byte, value []byte) error - - // Delete removes the key from the key-value data store. - Delete(key []byte) error -} - -// Stater wraps the Stat method of a backing data store. -type Stater interface { - // Stat returns a particular internal stat of the database. - Stat(property string) (string, error) -} - -// Compacter wraps the Compact method of a backing data store. -type Compacter interface { - // Compact flattens the underlying data store for the given key range. In essence, - // deleted and overwritten versions are discarded, and the data is rearranged to - // reduce the cost of operations needed to access them. - // - // A nil start is treated as a key before all keys in the data store; a nil limit - // is treated as a key after all keys in the data store. If both is nil then it - // will compact entire data store. - Compact(start []byte, limit []byte) error -} - -// KeyValueStore contains all the methods required to allow handling different -// key-value data stores backing the high level database. -type KeyValueStore interface { - KeyValueReader - KeyValueWriter - Batcher - Iteratee - Stater - Compacter - io.Closer -} - -// AncientReader contains the methods required to read from immutable ancient data. -type AncientReader interface { - // HasAncient returns an indicator whether the specified data exists in the - // ancient store. - HasAncient(kind string, number uint64) (bool, error) - - // Ancient retrieves an ancient binary blob from the append-only immutable files. - Ancient(kind string, number uint64) ([]byte, error) - - // Ancients returns the ancient item numbers in the ancient store. - Ancients() (uint64, error) - - // AncientSize returns the ancient size of the specified category. - AncientSize(kind string) (uint64, error) -} - -// AncientWriter contains the methods required to write to immutable ancient data. -type AncientWriter interface { - // AppendAncient injects all binary blobs belong to block at the end of the - // append-only immutable table files. - AppendAncient(number uint64, hash, header, body, receipt, td []byte) error - - // TruncateAncients discards all but the first n ancient data from the ancient store. - TruncateAncients(n uint64) error - - // Sync flushes all in-memory ancient store data to disk. - Sync() error -} - -// Reader contains the methods required to read data from both key-value as well as -// immutable ancient data. -type Reader interface { - KeyValueReader - AncientReader -} - -// Writer contains the methods required to write data to both key-value as well as -// immutable ancient data. -type Writer interface { - KeyValueWriter - AncientWriter -} - -// AncientStore contains all the methods required to allow handling different -// ancient data stores backing immutable chain data store. -type AncientStore interface { - AncientReader - AncientWriter - io.Closer -} - -// Database contains all the methods required by the high level database to not -// only access the key-value data store but also the chain freezer. -type Database interface { - Reader - Writer - Batcher - Iteratee - Stater - Compacter - io.Closer -} diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/iterator.go b/vendor/github.com/ethereum/go-ethereum/ethdb/iterator.go deleted file mode 100644 index 419e9bd..0000000 --- a/vendor/github.com/ethereum/go-ethereum/ethdb/iterator.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethdb - -// Iterator iterates over a database's key/value pairs in ascending key order. -// -// When it encounters an error any seek will return false and will yield no key/ -// value pairs. The error can be queried by calling the Error method. Calling -// Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read an -// iterator until exhaustion. An iterator is not safe for concurrent use, but it -// is safe to use multiple iterators concurrently. -type Iterator interface { - // Next moves the iterator to the next key/value pair. It returns whether the - // iterator is exhausted. - Next() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error - - // Key returns the key of the current key/value pair, or nil if done. The caller - // should not modify the contents of the returned slice, and its contents may - // change on the next call to Next. - Key() []byte - - // Value returns the value of the current key/value pair, or nil if done. The - // caller should not modify the contents of the returned slice, and its contents - // may change on the next call to Next. - Value() []byte - - // Release releases associated resources. Release should always succeed and can - // be called multiple times without causing error. - Release() -} - -// Iteratee wraps the NewIterator methods of a backing data store. -type Iteratee interface { - // NewIterator creates a binary-alphabetical iterator over the entire keyspace - // contained within the key-value database. - NewIterator() Iterator - - // NewIteratorWithStart creates a binary-alphabetical iterator over a subset of - // database content starting at a particular initial key (or after, if it does - // not exist). - NewIteratorWithStart(start []byte) Iterator - - // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset - // of database content with a particular key prefix. - NewIteratorWithPrefix(prefix []byte) Iterator -} diff --git a/vendor/github.com/ethereum/go-ethereum/fuzzbuzz.yaml b/vendor/github.com/ethereum/go-ethereum/fuzzbuzz.yaml deleted file mode 100644 index 2a4f0c2..0000000 --- a/vendor/github.com/ethereum/go-ethereum/fuzzbuzz.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# bmt keystore rlp trie whisperv6 - -base: ubuntu:16.04 -targets: - - name: rlp - language: go - version: "1.13" - corpus: ./fuzzers/rlp/corpus - harness: - function: Fuzz - package: github.com/ethereum/go-ethereum/tests/fuzzers/rlp - checkout: github.com/ethereum/go-ethereum/ - - name: keystore - language: go - version: "1.13" - corpus: ./fuzzers/keystore/corpus - harness: - function: Fuzz - package: github.com/ethereum/go-ethereum/tests/fuzzers/keystore - checkout: github.com/ethereum/go-ethereum/ - - name: trie - language: go - version: "1.13" - corpus: ./fuzzers/trie/corpus - harness: - function: Fuzz - package: github.com/ethereum/go-ethereum/tests/fuzzers/trie - checkout: github.com/ethereum/go-ethereum/ - - name: txfetcher - language: go - version: "1.13" - corpus: ./fuzzers/txfetcher/corpus - harness: - function: Fuzz - package: github.com/ethereum/go-ethereum/tests/fuzzers/txfetcher - checkout: github.com/ethereum/go-ethereum/ - - name: whisperv6 - language: go - version: "1.13" - corpus: ./fuzzers/whisperv6/corpus - harness: - function: Fuzz - package: github.com/ethereum/go-ethereum/tests/fuzzers/whisperv6 - checkout: github.com/ethereum/go-ethereum/ diff --git a/vendor/github.com/ethereum/go-ethereum/go.mod b/vendor/github.com/ethereum/go-ethereum/go.mod deleted file mode 100644 index 791d80c..0000000 --- a/vendor/github.com/ethereum/go-ethereum/go.mod +++ /dev/null @@ -1,71 +0,0 @@ -module github.com/ethereum/go-ethereum - -go 1.13 - -require ( - github.com/Azure/azure-pipeline-go v0.2.2 // indirect - github.com/Azure/azure-storage-blob-go v0.7.0 - github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect - github.com/VictoriaMetrics/fastcache v1.5.3 - github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 - github.com/aws/aws-sdk-go v1.25.48 - github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 - github.com/cespare/cp v0.1.0 - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 - github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea - github.com/dlclark/regexp2 v1.2.0 // indirect - github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf - github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87 - github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c - github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa - github.com/fatih/color v1.3.0 - github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc - github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/go-ole/go-ole v1.2.1 // indirect - github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect - github.com/go-stack/stack v1.8.0 - github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c - github.com/golang/snappy v0.0.1 - github.com/google/go-cmp v0.3.1 // indirect - github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 - github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 - github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad - github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 - github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 - github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 - github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 - github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 - github.com/kr/pretty v0.1.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.0 - github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 - github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 - github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c - github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 - github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 - github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 - github.com/rjeczalik/notify v0.9.1 - github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 - github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect - github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 - github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 - github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect - github.com/stretchr/testify v1.4.0 - github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d - github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 - golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 - golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 - golang.org/x/text v0.3.2 - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 - gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce - gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 - gopkg.in/urfave/cli.v1 v1.20.0 - gotest.tools v2.2.0+incompatible // indirect -) diff --git a/vendor/github.com/ethereum/go-ethereum/go.sum b/vendor/github.com/ethereum/go-ethereum/go.sum deleted file mode 100644 index 0000392..0000000 --- a/vendor/github.com/ethereum/go-ethereum/go.sum +++ /dev/null @@ -1,236 +0,0 @@ -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4= -github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= -github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87 h1:OMbqMXf9OAXzH1dDH82mQMrddBE8LIIwDtxeK4wE1/A= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa h1:XKAhUk/dtp+CV0VO6mhG2V7jA9vbcGcnYF/Ay9NjZrY= -github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/fatih/color v1.3.0 h1:YehCCcyeQ6Km0D6+IapqPinWBK6y+0eB5umvZXK9WPs= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c h1:zqAKixg3cTcIasAMJV+EcfVbWwLpOZ7LeoWJvcuD/5Q= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad h1:eMxs9EL0PvIGS9TTtxg4R+JxuPGav82J8rA+GFnY7po= -github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3 h1:DqD8eigqlUm0+znmx7zhL0xvTW3+e1jCekJMfBUADWI= -github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 h1:FSeK4fZCo8u40n2JMnyAsd6x7+SbvoOMHvQOU/n10P4= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 h1:F/iKcka0K2LgnKy/fgSBf235AETtm1n1TvBzqu40LE0= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9 h1:5Cp3cVwpQP4aCQ6jx6dNLP3IarbYiuStmIzYu+BjQwY= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA= -golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/vendor/github.com/ethereum/go-ethereum/interfaces.go b/vendor/github.com/ethereum/go-ethereum/interfaces.go deleted file mode 100644 index 1ff31f9..0000000 --- a/vendor/github.com/ethereum/go-ethereum/interfaces.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package ethereum defines interfaces for interacting with Ethereum. -package ethereum - -import ( - "context" - "errors" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -// NotFound is returned by API methods if the requested item does not exist. -var NotFound = errors.New("not found") - -// TODO: move subscription to package event - -// Subscription represents an event subscription where events are -// delivered on a data channel. -type Subscription interface { - // Unsubscribe cancels the sending of events to the data channel - // and closes the error channel. - Unsubscribe() - // Err returns the subscription error channel. The error channel receives - // a value if there is an issue with the subscription (e.g. the network connection - // delivering the events has been closed). Only one value will ever be sent. - // The error channel is closed by Unsubscribe. - Err() <-chan error -} - -// ChainReader provides access to the blockchain. The methods in this interface access raw -// data from either the canonical chain (when requesting by block number) or any -// blockchain fork that was previously downloaded and processed by the node. The block -// number argument can be nil to select the latest canonical block. Reading block headers -// should be preferred over full blocks whenever possible. -// -// The returned error is NotFound if the requested item does not exist. -type ChainReader interface { - BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) - BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) - HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) - TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) - TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) - - // This method subscribes to notifications about changes of the head block of - // the canonical chain. - SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (Subscription, error) -} - -// TransactionReader provides access to past transactions and their receipts. -// Implementations may impose arbitrary restrictions on the transactions and receipts that -// can be retrieved. Historic transactions may not be available. -// -// Avoid relying on this interface if possible. Contract logs (through the LogFilterer -// interface) are more reliable and usually safer in the presence of chain -// reorganisations. -// -// The returned error is NotFound if the requested item does not exist. -type TransactionReader interface { - // TransactionByHash checks the pool of pending transactions in addition to the - // blockchain. The isPending return value indicates whether the transaction has been - // mined yet. Note that the transaction may not be part of the canonical chain even if - // it's not pending. - TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) - // TransactionReceipt returns the receipt of a mined transaction. Note that the - // transaction may not be included in the current canonical chain even if a receipt - // exists. - TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) -} - -// ChainStateReader wraps access to the state trie of the canonical blockchain. Note that -// implementations of the interface may be unable to return state values for old blocks. -// In many cases, using CallContract can be preferable to reading raw contract storage. -type ChainStateReader interface { - BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) - StorageAt(ctx context.Context, account common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) - CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) - NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) -} - -// SyncProgress gives progress indications when the node is synchronising with -// the Ethereum network. -type SyncProgress struct { - StartingBlock uint64 // Block number where sync began - CurrentBlock uint64 // Current block number where sync is at - HighestBlock uint64 // Highest alleged block number in the chain - PulledStates uint64 // Number of state trie entries already downloaded - KnownStates uint64 // Total number of state trie entries known about -} - -// ChainSyncReader wraps access to the node's current sync status. If there's no -// sync currently running, it returns nil. -type ChainSyncReader interface { - SyncProgress(ctx context.Context) (*SyncProgress, error) -} - -// CallMsg contains parameters for contract calls. -type CallMsg struct { - From common.Address // the sender of the 'transaction' - To *common.Address // the destination contract (nil for contract creation) - Gas uint64 // if 0, the call executes with near-infinite gas - GasPrice *big.Int // wei <-> gas exchange ratio - Value *big.Int // amount of wei sent along with the call - Data []byte // input data, usually an ABI-encoded contract method invocation -} - -// A ContractCaller provides contract calls, essentially transactions that are executed by -// the EVM but not mined into the blockchain. ContractCall is a low-level method to -// execute such calls. For applications which are structured around specific contracts, -// the abigen tool provides a nicer, properly typed way to perform calls. -type ContractCaller interface { - CallContract(ctx context.Context, call CallMsg, blockNumber *big.Int) ([]byte, error) -} - -// FilterQuery contains options for contract log filtering. -type FilterQuery struct { - BlockHash *common.Hash // used by eth_getLogs, return logs only from block with this hash - FromBlock *big.Int // beginning of the queried range, nil means genesis block - ToBlock *big.Int // end of the range, nil means latest block - Addresses []common.Address // restricts matches to events created by specific contracts - - // The Topic list restricts matches to particular event topics. Each event has a list - // of topics. Topics matches a prefix of that list. An empty element slice matches any - // topic. Non-empty elements represent an alternative that matches any of the - // contained topics. - // - // Examples: - // {} or nil matches any topic list - // {{A}} matches topic A in first position - // {{}, {B}} matches any topic in first position AND B in second position - // {{A}, {B}} matches topic A in first position AND B in second position - // {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position - Topics [][]common.Hash -} - -// LogFilterer provides access to contract log events using a one-off query or continuous -// event subscription. -// -// Logs received through a streaming query subscription may have Removed set to true, -// indicating that the log was reverted due to a chain reorganisation. -type LogFilterer interface { - FilterLogs(ctx context.Context, q FilterQuery) ([]types.Log, error) - SubscribeFilterLogs(ctx context.Context, q FilterQuery, ch chan<- types.Log) (Subscription, error) -} - -// TransactionSender wraps transaction sending. The SendTransaction method injects a -// signed transaction into the pending transaction pool for execution. If the transaction -// was a contract creation, the TransactionReceipt method can be used to retrieve the -// contract address after the transaction has been mined. -// -// The transaction must be signed and have a valid nonce to be included. Consumers of the -// API can use package accounts to maintain local private keys and need can retrieve the -// next available nonce using PendingNonceAt. -type TransactionSender interface { - SendTransaction(ctx context.Context, tx *types.Transaction) error -} - -// GasPricer wraps the gas price oracle, which monitors the blockchain to determine the -// optimal gas price given current fee market conditions. -type GasPricer interface { - SuggestGasPrice(ctx context.Context) (*big.Int, error) -} - -// A PendingStateReader provides access to the pending state, which is the result of all -// known executable transactions which have not yet been included in the blockchain. It is -// commonly used to display the result of ’unconfirmed’ actions (e.g. wallet value -// transfers) initiated by the user. The PendingNonceAt operation is a good way to -// retrieve the next available transaction nonce for a specific account. -type PendingStateReader interface { - PendingBalanceAt(ctx context.Context, account common.Address) (*big.Int, error) - PendingStorageAt(ctx context.Context, account common.Address, key common.Hash) ([]byte, error) - PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) - PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) - PendingTransactionCount(ctx context.Context) (uint, error) -} - -// PendingContractCaller can be used to perform calls against the pending state. -type PendingContractCaller interface { - PendingCallContract(ctx context.Context, call CallMsg) ([]byte, error) -} - -// GasEstimator wraps EstimateGas, which tries to estimate the gas needed to execute a -// specific transaction based on the pending state. There is no guarantee that this is the -// true gas limit requirement as other transactions may be added or removed by miners, but -// it should provide a basis for setting a reasonable default. -type GasEstimator interface { - EstimateGas(ctx context.Context, call CallMsg) (uint64, error) -} - -// A PendingStateEventer provides access to real time notifications about changes to the -// pending state. -type PendingStateEventer interface { - SubscribePendingTransactions(ctx context.Context, ch chan<- *types.Transaction) (Subscription, error) -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/CONTRIBUTORS b/vendor/github.com/ethereum/go-ethereum/log/CONTRIBUTORS deleted file mode 100644 index a086671..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/CONTRIBUTORS +++ /dev/null @@ -1,11 +0,0 @@ -Contributors to log15: - -- Aaron L -- Alan Shreve -- Chris Hines -- Ciaran Downey -- Dmitry Chestnykh -- Evan Shaw -- Péter Szilágyi -- Trevor Gattis -- Vincent Vanackere diff --git a/vendor/github.com/ethereum/go-ethereum/log/LICENSE b/vendor/github.com/ethereum/go-ethereum/log/LICENSE deleted file mode 100644 index 5f0d1fb..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/ethereum/go-ethereum/log/README.md b/vendor/github.com/ethereum/go-ethereum/log/README.md deleted file mode 100644 index 4742680..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/README.md +++ /dev/null @@ -1,77 +0,0 @@ -![obligatory xkcd](https://imgs.xkcd.com/comics/standards.png) - -# log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15) - -Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](https://golang.org/pkg/io/) and [`net/http`](https://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](https://golang.org/pkg/log/) package. - -## Features -- A simple, easy-to-understand API -- Promotes structured logging by encouraging use of key/value pairs -- Child loggers which inherit and add their own private context -- Lazy evaluation of expensive operations -- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API. -- Color terminal support -- Built-in support for logging to files, streams, syslog, and the network -- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more - -## Versioning -The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API, -you must vendor the library. - -## Importing - -```go -import log "github.com/inconshreveable/log15" -``` - -## Examples - -```go -// all loggers can have key/value context -srvlog := log.New("module", "app/server") - -// all log messages can have key/value context -srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate) - -// child loggers with inherited context -connlog := srvlog.New("raddr", c.RemoteAddr()) -connlog.Info("connection open") - -// lazy evaluation -connlog.Debug("ping remote", "latency", log.Lazy{pingRemote}) - -// flexible configuration -srvlog.SetHandler(log.MultiHandler( - log.StreamHandler(os.Stderr, log.LogfmtFormat()), - log.LvlFilterHandler( - log.LvlError, - log.Must.FileHandler("errors.json", log.JSONFormat())))) -``` - -Will result in output that looks like this: - -``` -WARN[06-17|21:58:10] abnormal conn rate module=app/server rate=0.500 low=0.100 high=0.800 -INFO[06-17|21:58:10] connection open module=app/server raddr=10.0.0.1 -``` - -## Breaking API Changes -The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version -of log15. - -- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler -- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack` -- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors - -## FAQ - -### The varargs style is brittle and error prone! Can I have type safety please? -Yes. Use `log.Ctx`: - -```go -srvlog := log.New(log.Ctx{"module": "app/server"}) -srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate}) -``` - -## License -Apache diff --git a/vendor/github.com/ethereum/go-ethereum/log/README_ETHEREUM.md b/vendor/github.com/ethereum/go-ethereum/log/README_ETHEREUM.md deleted file mode 100644 index f6c42cc..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/README_ETHEREUM.md +++ /dev/null @@ -1,5 +0,0 @@ -This package is a fork of https://github.com/inconshreveable/log15, with some -minor modifications required by the go-ethereum codebase: - - * Support for log level `trace` - * Modified behavior to exit on `critical` failure diff --git a/vendor/github.com/ethereum/go-ethereum/log/doc.go b/vendor/github.com/ethereum/go-ethereum/log/doc.go deleted file mode 100644 index bff2f49..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/doc.go +++ /dev/null @@ -1,333 +0,0 @@ -/* -Package log15 provides an opinionated, simple toolkit for best-practice logging that is -both human and machine readable. It is modeled after the standard library's io and net/http -packages. - -This package enforces you to only log key/value pairs. Keys must be strings. Values may be -any type that you like. The default output format is logfmt, but you may also choose to use -JSON instead if that suits you. Here's how you log: - - log.Info("page accessed", "path", r.URL.Path, "user_id", user.id) - -This will output a line that looks like: - - lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9 - -Getting Started - -To get started, you'll want to import the library: - - import log "github.com/inconshreveable/log15" - - -Now you're ready to start logging: - - func main() { - log.Info("Program starting", "args", os.Args()) - } - - -Convention - -Because recording a human-meaningful message is common and good practice, the first argument to every -logging method is the value to the *implicit* key 'msg'. - -Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so -will the current timestamp with key 't'. - -You may supply any additional context as a set of key/value pairs to the logging function. log15 allows -you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for -logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate -in the variadic argument list: - - log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val) - -If you really do favor your type-safety, you may choose to pass a log.Ctx instead: - - log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val}) - - -Context loggers - -Frequently, you want to add context to a logger so that you can track actions associated with it. An http -request is a good example. You can easily create new loggers that have context that is automatically included -with each log line: - - requestlogger := log.New("path", r.URL.Path) - - // later - requestlogger.Debug("db txn commit", "duration", txnTimer.Finish()) - -This will output a log line that includes the path context that is attached to the logger: - - lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12 - - -Handlers - -The Handler interface defines where log lines are printed to and how they are formated. Handler is a -single interface that is inspired by net/http's handler interface: - - type Handler interface { - Log(r *Record) error - } - - -Handlers can filter records, format them, or dispatch to multiple other Handlers. -This package implements a number of Handlers for common logging patterns that are -easily composed to create flexible, custom logging structures. - -Here's an example handler that prints logfmt output to Stdout: - - handler := log.StreamHandler(os.Stdout, log.LogfmtFormat()) - -Here's an example handler that defers to two other handlers. One handler only prints records -from the rpc package in logfmt to standard out. The other prints records at Error level -or above in JSON formatted output to the file /var/log/service.json - - handler := log.MultiHandler( - log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JSONFormat())), - log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler()) - ) - -Logging File Names and Line Numbers - -This package implements three Handlers that add debugging information to the -context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's -an example that adds the source file and line number of each logging call to -the context. - - h := log.CallerFileHandler(log.StdoutHandler) - log.Root().SetHandler(h) - ... - log.Error("open file", "err", err) - -This will output a line that looks like: - - lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42 - -Here's an example that logs the call stack rather than just the call site. - - h := log.CallerStackHandler("%+v", log.StdoutHandler) - log.Root().SetHandler(h) - ... - log.Error("open file", "err", err) - -This will output a line that looks like: - - lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]" - -The "%+v" format instructs the handler to include the path of the source file -relative to the compile time GOPATH. The github.com/go-stack/stack package -documents the full list of formatting verbs and modifiers available. - -Custom Handlers - -The Handler interface is so simple that it's also trivial to write your own. Let's create an -example handler which tries to write to one handler, but if that fails it falls back to -writing to another handler and includes the error that it encountered when trying to write -to the primary. This might be useful when trying to log over a network socket, but if that -fails you want to log those records to a file on disk. - - type BackupHandler struct { - Primary Handler - Secondary Handler - } - - func (h *BackupHandler) Log (r *Record) error { - err := h.Primary.Log(r) - if err != nil { - r.Ctx = append(ctx, "primary_err", err) - return h.Secondary.Log(r) - } - return nil - } - -This pattern is so useful that a generic version that handles an arbitrary number of Handlers -is included as part of this library called FailoverHandler. - -Logging Expensive Operations - -Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay -the price of computing them if you haven't turned up your logging level to a high level of detail. - -This package provides a simple type to annotate a logging operation that you want to be evaluated -lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler -filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example: - - func factorRSAKey() (factors []int) { - // return the factors of a very large number - } - - log.Debug("factors", log.Lazy{factorRSAKey}) - -If this message is not logged for any reason (like logging at the Error level), then -factorRSAKey is never evaluated. - -Dynamic context values - -The same log.Lazy mechanism can be used to attach context to a logger which you want to be -evaluated when the message is logged, but not when the logger is created. For example, let's imagine -a game where you have Player objects: - - type Player struct { - name string - alive bool - log.Logger - } - -You always want to log a player's name and whether they're alive or dead, so when you create the player -object, you might do: - - p := &Player{name: name, alive: true} - p.Logger = log.New("name", p.name, "alive", p.alive) - -Only now, even after a player has died, the logger will still report they are alive because the logging -context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation -of whether the player is alive or not to each log message, so that the log records will reflect the player's -current state no matter when the log message is written: - - p := &Player{name: name, alive: true} - isAlive := func() bool { return p.alive } - player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive}) - -Terminal Format - -If log15 detects that stdout is a terminal, it will configure the default -handler for it (which is log.StdoutHandler) to use TerminalFormat. This format -logs records nicely for your terminal, including color-coded output based -on log level. - -Error Handling - -Becasuse log15 allows you to step around the type system, there are a few ways you can specify -invalid arguments to the logging functions. You could, for example, wrap something that is not -a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries -are typically the mechanism by which errors are reported, it would be onerous for the logging functions -to return errors. Instead, log15 handles errors by making these guarantees to you: - -- Any log record containing an error will still be printed with the error explained to you as part of the log record. - -- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily -(and if you like, automatically) detect if any of your logging calls are passing bad values. - -Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers -are encouraged to return errors only if they fail to write their log records out to an external source like if the -syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures -like the FailoverHandler. - -Library Use - -log15 is intended to be useful for library authors as a way to provide configurable logging to -users of their library. Best practice for use in a library is to always disable all output for your logger -by default and to provide a public Logger instance that consumers of your library can configure. Like so: - - package yourlib - - import "github.com/inconshreveable/log15" - - var Log = log.New() - - func init() { - Log.SetHandler(log.DiscardHandler()) - } - -Users of your library may then enable it if they like: - - import "github.com/inconshreveable/log15" - import "example.com/yourlib" - - func main() { - handler := // custom handler setup - yourlib.Log.SetHandler(handler) - } - -Best practices attaching logger context - -The ability to attach context to a logger is a powerful one. Where should you do it and why? -I favor embedding a Logger directly into any persistent object in my application and adding -unique, tracing context keys to it. For instance, imagine I am writing a web browser: - - type Tab struct { - url string - render *RenderingContext - // ... - - Logger - } - - func NewTab(url string) *Tab { - return &Tab { - // ... - url: url, - - Logger: log.New("url", url), - } - } - -When a new tab is created, I assign a logger to it with the url of -the tab as context so it can easily be traced through the logs. -Now, whenever we perform any operation with the tab, we'll log with its -embedded logger and it will include the tab title automatically: - - tab.Debug("moved position", "idx", tab.idx) - -There's only one problem. What if the tab url changes? We could -use log.Lazy to make sure the current url is always written, but that -would mean that we couldn't trace a tab's full lifetime through our -logs after the user navigate to a new URL. - -Instead, think about what values to attach to your loggers the -same way you think about what to use as a key in a SQL database schema. -If it's possible to use a natural key that is unique for the lifetime of the -object, do so. But otherwise, log15's ext package has a handy RandId -function to let you generate what you might call "surrogate keys" -They're just random hex identifiers to use for tracing. Back to our -Tab example, we would prefer to set up our Logger like so: - - import logext "github.com/inconshreveable/log15/ext" - - t := &Tab { - // ... - url: url, - } - - t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl}) - return t - -Now we'll have a unique traceable identifier even across loading new urls, but -we'll still be able to see the tab's current url in the log messages. - -Must - -For all Handler functions which can return an error, there is a version of that -function which will return no error but panics on failure. They are all available -on the Must object. For example: - - log.Must.FileHandler("/path", log.JSONFormat) - log.Must.NetHandler("tcp", ":1234", log.JSONFormat) - -Inspiration and Credit - -All of the following excellent projects inspired the design of this library: - -code.google.com/p/log4go - -github.com/op/go-logging - -github.com/technoweenie/grohl - -github.com/Sirupsen/logrus - -github.com/kr/logfmt - -github.com/spacemonkeygo/spacelog - -golang's stdlib, notably io and net/http - -The Name - -https://xkcd.com/927/ - -*/ -package log diff --git a/vendor/github.com/ethereum/go-ethereum/log/format.go b/vendor/github.com/ethereum/go-ethereum/log/format.go deleted file mode 100644 index a1b5dac..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/format.go +++ /dev/null @@ -1,406 +0,0 @@ -package log - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" -) - -const ( - timeFormat = "2006-01-02T15:04:05-0700" - termTimeFormat = "01-02|15:04:05.000" - floatFormat = 'f' - termMsgJust = 40 - termCtxMaxPadding = 40 -) - -// locationTrims are trimmed for display to avoid unwieldy log lines. -var locationTrims = []string{ - "github.com/ethereum/go-ethereum/", -} - -// PrintOrigins sets or unsets log location (file:line) printing for terminal -// format output. -func PrintOrigins(print bool) { - if print { - atomic.StoreUint32(&locationEnabled, 1) - } else { - atomic.StoreUint32(&locationEnabled, 0) - } -} - -// locationEnabled is an atomic flag controlling whether the terminal formatter -// should append the log locations too when printing entries. -var locationEnabled uint32 - -// locationLength is the maxmimum path length encountered, which all logs are -// padded to to aid in alignment. -var locationLength uint32 - -// fieldPadding is a global map with maximum field value lengths seen until now -// to allow padding log contexts in a bit smarter way. -var fieldPadding = make(map[string]int) - -// fieldPaddingLock is a global mutex protecting the field padding map. -var fieldPaddingLock sync.RWMutex - -type Format interface { - Format(r *Record) []byte -} - -// FormatFunc returns a new Format object which uses -// the given function to perform record formatting. -func FormatFunc(f func(*Record) []byte) Format { - return formatFunc(f) -} - -type formatFunc func(*Record) []byte - -func (f formatFunc) Format(r *Record) []byte { - return f(r) -} - -// TerminalStringer is an analogous interface to the stdlib stringer, allowing -// own types to have custom shortened serialization formats when printed to the -// screen. -type TerminalStringer interface { - TerminalString() string -} - -// TerminalFormat formats log records optimized for human readability on -// a terminal with color-coded level output and terser human friendly timestamp. -// This format should only be used for interactive programs or while developing. -// -// [LEVEL] [TIME] MESAGE key=value key=value ... -// -// Example: -// -// [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002 -// -func TerminalFormat(usecolor bool) Format { - return FormatFunc(func(r *Record) []byte { - var color = 0 - if usecolor { - switch r.Lvl { - case LvlCrit: - color = 35 - case LvlError: - color = 31 - case LvlWarn: - color = 33 - case LvlInfo: - color = 32 - case LvlDebug: - color = 36 - case LvlTrace: - color = 34 - } - } - - b := &bytes.Buffer{} - lvl := r.Lvl.AlignedString() - if atomic.LoadUint32(&locationEnabled) != 0 { - // Log origin printing was requested, format the location path and line number - location := fmt.Sprintf("%+v", r.Call) - for _, prefix := range locationTrims { - location = strings.TrimPrefix(location, prefix) - } - // Maintain the maximum location length for fancyer alignment - align := int(atomic.LoadUint32(&locationLength)) - if align < len(location) { - align = len(location) - atomic.StoreUint32(&locationLength, uint32(align)) - } - padding := strings.Repeat(" ", align-len(location)) - - // Assemble and print the log heading - if color > 0 { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s|%s]%s %s ", color, lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg) - } else { - fmt.Fprintf(b, "%s[%s|%s]%s %s ", lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg) - } - } else { - if color > 0 { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg) - } else { - fmt.Fprintf(b, "%s[%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg) - } - } - // try to justify the log output for short messages - length := utf8.RuneCountInString(r.Msg) - if len(r.Ctx) > 0 && length < termMsgJust { - b.Write(bytes.Repeat([]byte{' '}, termMsgJust-length)) - } - // print the keys logfmt style - logfmt(b, r.Ctx, color, true) - return b.Bytes() - }) -} - -// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable -// format for key/value pairs. -// -// For more details see: http://godoc.org/github.com/kr/logfmt -// -func LogfmtFormat() Format { - return FormatFunc(func(r *Record) []byte { - common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg} - buf := &bytes.Buffer{} - logfmt(buf, append(common, r.Ctx...), 0, false) - return buf.Bytes() - }) -} - -func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) { - for i := 0; i < len(ctx); i += 2 { - if i != 0 { - buf.WriteByte(' ') - } - - k, ok := ctx[i].(string) - v := formatLogfmtValue(ctx[i+1], term) - if !ok { - k, v = errorKey, formatLogfmtValue(k, term) - } - - // XXX: we should probably check that all of your key bytes aren't invalid - fieldPaddingLock.RLock() - padding := fieldPadding[k] - fieldPaddingLock.RUnlock() - - length := utf8.RuneCountInString(v) - if padding < length && length <= termCtxMaxPadding { - padding = length - - fieldPaddingLock.Lock() - fieldPadding[k] = padding - fieldPaddingLock.Unlock() - } - if color > 0 { - fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=", color, k) - } else { - buf.WriteString(k) - buf.WriteByte('=') - } - buf.WriteString(v) - if i < len(ctx)-2 && padding > length { - buf.Write(bytes.Repeat([]byte{' '}, padding-length)) - } - } - buf.WriteByte('\n') -} - -// JSONFormat formats log records as JSON objects separated by newlines. -// It is the equivalent of JSONFormatEx(false, true). -func JSONFormat() Format { - return JSONFormatEx(false, true) -} - -// JSONFormatOrderedEx formats log records as JSON arrays. If pretty is true, -// records will be pretty-printed. If lineSeparated is true, records -// will be logged with a new line between each record. -func JSONFormatOrderedEx(pretty, lineSeparated bool) Format { - jsonMarshal := json.Marshal - if pretty { - jsonMarshal = func(v interface{}) ([]byte, error) { - return json.MarshalIndent(v, "", " ") - } - } - return FormatFunc(func(r *Record) []byte { - props := make(map[string]interface{}) - - props[r.KeyNames.Time] = r.Time - props[r.KeyNames.Lvl] = r.Lvl.String() - props[r.KeyNames.Msg] = r.Msg - - ctx := make([]string, len(r.Ctx)) - for i := 0; i < len(r.Ctx); i += 2 { - k, ok := r.Ctx[i].(string) - if !ok { - props[errorKey] = fmt.Sprintf("%+v is not a string key,", r.Ctx[i]) - } - ctx[i] = k - ctx[i+1] = formatLogfmtValue(r.Ctx[i+1], true) - } - props[r.KeyNames.Ctx] = ctx - - b, err := jsonMarshal(props) - if err != nil { - b, _ = jsonMarshal(map[string]string{ - errorKey: err.Error(), - }) - return b - } - if lineSeparated { - b = append(b, '\n') - } - return b - }) -} - -// JSONFormatEx formats log records as JSON objects. If pretty is true, -// records will be pretty-printed. If lineSeparated is true, records -// will be logged with a new line between each record. -func JSONFormatEx(pretty, lineSeparated bool) Format { - jsonMarshal := json.Marshal - if pretty { - jsonMarshal = func(v interface{}) ([]byte, error) { - return json.MarshalIndent(v, "", " ") - } - } - - return FormatFunc(func(r *Record) []byte { - props := make(map[string]interface{}) - - props[r.KeyNames.Time] = r.Time - props[r.KeyNames.Lvl] = r.Lvl.String() - props[r.KeyNames.Msg] = r.Msg - - for i := 0; i < len(r.Ctx); i += 2 { - k, ok := r.Ctx[i].(string) - if !ok { - props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i]) - } - props[k] = formatJSONValue(r.Ctx[i+1]) - } - - b, err := jsonMarshal(props) - if err != nil { - b, _ = jsonMarshal(map[string]string{ - errorKey: err.Error(), - }) - return b - } - - if lineSeparated { - b = append(b, '\n') - } - - return b - }) -} - -func formatShared(value interface{}) (result interface{}) { - defer func() { - if err := recover(); err != nil { - if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() { - result = "nil" - } else { - panic(err) - } - } - }() - - switch v := value.(type) { - case time.Time: - return v.Format(timeFormat) - - case error: - return v.Error() - - case fmt.Stringer: - return v.String() - - default: - return v - } -} - -func formatJSONValue(value interface{}) interface{} { - value = formatShared(value) - switch value.(type) { - case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string: - return value - default: - return fmt.Sprintf("%+v", value) - } -} - -// formatValue formats a value for serialization -func formatLogfmtValue(value interface{}, term bool) string { - if value == nil { - return "nil" - } - - if t, ok := value.(time.Time); ok { - // Performance optimization: No need for escaping since the provided - // timeFormat doesn't have any escape characters, and escaping is - // expensive. - return t.Format(timeFormat) - } - if term { - if s, ok := value.(TerminalStringer); ok { - // Custom terminal stringer provided, use that - return escapeString(s.TerminalString()) - } - } - value = formatShared(value) - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case float32: - return strconv.FormatFloat(float64(v), floatFormat, 3, 64) - case float64: - return strconv.FormatFloat(v, floatFormat, 3, 64) - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - return fmt.Sprintf("%d", value) - case string: - return escapeString(v) - default: - return escapeString(fmt.Sprintf("%+v", value)) - } -} - -var stringBufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -func escapeString(s string) string { - needsQuotes := false - needsEscape := false - for _, r := range s { - if r <= ' ' || r == '=' || r == '"' { - needsQuotes = true - } - if r == '\\' || r == '"' || r == '\n' || r == '\r' || r == '\t' { - needsEscape = true - } - } - if !needsEscape && !needsQuotes { - return s - } - e := stringBufPool.Get().(*bytes.Buffer) - e.WriteByte('"') - for _, r := range s { - switch r { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(byte(r)) - case '\n': - e.WriteString("\\n") - case '\r': - e.WriteString("\\r") - case '\t': - e.WriteString("\\t") - default: - e.WriteRune(r) - } - } - e.WriteByte('"') - var ret string - if needsQuotes { - ret = e.String() - } else { - ret = string(e.Bytes()[1 : e.Len()-1]) - } - e.Reset() - stringBufPool.Put(e) - return ret -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/handler.go b/vendor/github.com/ethereum/go-ethereum/log/handler.go deleted file mode 100644 index 3c99114..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/handler.go +++ /dev/null @@ -1,359 +0,0 @@ -package log - -import ( - "fmt" - "io" - "net" - "os" - "reflect" - "sync" - - "github.com/go-stack/stack" -) - -// Handler defines where and how log records are written. -// A Logger prints its log records by writing to a Handler. -// Handlers are composable, providing you great flexibility in combining -// them to achieve the logging structure that suits your applications. -type Handler interface { - Log(r *Record) error -} - -// FuncHandler returns a Handler that logs records with the given -// function. -func FuncHandler(fn func(r *Record) error) Handler { - return funcHandler(fn) -} - -type funcHandler func(r *Record) error - -func (h funcHandler) Log(r *Record) error { - return h(r) -} - -// StreamHandler writes log records to an io.Writer -// with the given format. StreamHandler can be used -// to easily begin writing log records to other -// outputs. -// -// StreamHandler wraps itself with LazyHandler and SyncHandler -// to evaluate Lazy objects and perform safe concurrent writes. -func StreamHandler(wr io.Writer, fmtr Format) Handler { - h := FuncHandler(func(r *Record) error { - _, err := wr.Write(fmtr.Format(r)) - return err - }) - return LazyHandler(SyncHandler(h)) -} - -// SyncHandler can be wrapped around a handler to guarantee that -// only a single Log operation can proceed at a time. It's necessary -// for thread-safe concurrent writes. -func SyncHandler(h Handler) Handler { - var mu sync.Mutex - return FuncHandler(func(r *Record) error { - defer mu.Unlock() - mu.Lock() - return h.Log(r) - }) -} - -// FileHandler returns a handler which writes log records to the give file -// using the given format. If the path -// already exists, FileHandler will append to the given file. If it does not, -// FileHandler will create the file with mode 0644. -func FileHandler(path string, fmtr Format) (Handler, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - return closingHandler{f, StreamHandler(f, fmtr)}, nil -} - -// NetHandler opens a socket to the given address and writes records -// over the connection. -func NetHandler(network, addr string, fmtr Format) (Handler, error) { - conn, err := net.Dial(network, addr) - if err != nil { - return nil, err - } - - return closingHandler{conn, StreamHandler(conn, fmtr)}, nil -} - -// XXX: closingHandler is essentially unused at the moment -// it's meant for a future time when the Handler interface supports -// a possible Close() operation -type closingHandler struct { - io.WriteCloser - Handler -} - -func (h *closingHandler) Close() error { - return h.WriteCloser.Close() -} - -// CallerFileHandler returns a Handler that adds the line number and file of -// the calling function to the context with key "caller". -func CallerFileHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call)) - return h.Log(r) - }) -} - -// CallerFuncHandler returns a Handler that adds the calling function name to -// the context with key "fn". -func CallerFuncHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - r.Ctx = append(r.Ctx, "fn", formatCall("%+n", r.Call)) - return h.Log(r) - }) -} - -// This function is here to please go vet on Go < 1.8. -func formatCall(format string, c stack.Call) string { - return fmt.Sprintf(format, c) -} - -// CallerStackHandler returns a Handler that adds a stack trace to the context -// with key "stack". The stack trace is formated as a space separated list of -// call sites inside matching []'s. The most recent call site is listed first. -// Each call site is formatted according to format. See the documentation of -// package github.com/go-stack/stack for the list of supported formats. -func CallerStackHandler(format string, h Handler) Handler { - return FuncHandler(func(r *Record) error { - s := stack.Trace().TrimBelow(r.Call).TrimRuntime() - if len(s) > 0 { - r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s)) - } - return h.Log(r) - }) -} - -// FilterHandler returns a Handler that only writes records to the -// wrapped Handler if the given function evaluates true. For example, -// to only log records where the 'err' key is not nil: -// -// logger.SetHandler(FilterHandler(func(r *Record) bool { -// for i := 0; i < len(r.Ctx); i += 2 { -// if r.Ctx[i] == "err" { -// return r.Ctx[i+1] != nil -// } -// } -// return false -// }, h)) -// -func FilterHandler(fn func(r *Record) bool, h Handler) Handler { - return FuncHandler(func(r *Record) error { - if fn(r) { - return h.Log(r) - } - return nil - }) -} - -// MatchFilterHandler returns a Handler that only writes records -// to the wrapped Handler if the given key in the logged -// context matches the value. For example, to only log records -// from your ui package: -// -// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler) -// -func MatchFilterHandler(key string, value interface{}, h Handler) Handler { - return FilterHandler(func(r *Record) (pass bool) { - switch key { - case r.KeyNames.Lvl: - return r.Lvl == value - case r.KeyNames.Time: - return r.Time == value - case r.KeyNames.Msg: - return r.Msg == value - } - - for i := 0; i < len(r.Ctx); i += 2 { - if r.Ctx[i] == key { - return r.Ctx[i+1] == value - } - } - return false - }, h) -} - -// LvlFilterHandler returns a Handler that only writes -// records which are less than the given verbosity -// level to the wrapped Handler. For example, to only -// log Error/Crit records: -// -// log.LvlFilterHandler(log.LvlError, log.StdoutHandler) -// -func LvlFilterHandler(maxLvl Lvl, h Handler) Handler { - return FilterHandler(func(r *Record) (pass bool) { - return r.Lvl <= maxLvl - }, h) -} - -// MultiHandler dispatches any write to each of its handlers. -// This is useful for writing different types of log information -// to different locations. For example, to log to a file and -// standard error: -// -// log.MultiHandler( -// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), -// log.StderrHandler) -// -func MultiHandler(hs ...Handler) Handler { - return FuncHandler(func(r *Record) error { - for _, h := range hs { - // what to do about failures? - h.Log(r) - } - return nil - }) -} - -// FailoverHandler writes all log records to the first handler -// specified, but will failover and write to the second handler if -// the first handler has failed, and so on for all handlers specified. -// For example you might want to log to a network socket, but failover -// to writing to a file if the network fails, and then to -// standard out if the file write fails: -// -// log.FailoverHandler( -// log.Must.NetHandler("tcp", ":9090", log.JSONFormat()), -// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), -// log.StdoutHandler) -// -// All writes that do not go to the first handler will add context with keys of -// the form "failover_err_{idx}" which explain the error encountered while -// trying to write to the handlers before them in the list. -func FailoverHandler(hs ...Handler) Handler { - return FuncHandler(func(r *Record) error { - var err error - for i, h := range hs { - err = h.Log(r) - if err == nil { - return nil - } - r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err) - } - - return err - }) -} - -// ChannelHandler writes all records to the given channel. -// It blocks if the channel is full. Useful for async processing -// of log messages, it's used by BufferedHandler. -func ChannelHandler(recs chan<- *Record) Handler { - return FuncHandler(func(r *Record) error { - recs <- r - return nil - }) -} - -// BufferedHandler writes all records to a buffered -// channel of the given size which flushes into the wrapped -// handler whenever it is available for writing. Since these -// writes happen asynchronously, all writes to a BufferedHandler -// never return an error and any errors from the wrapped handler are ignored. -func BufferedHandler(bufSize int, h Handler) Handler { - recs := make(chan *Record, bufSize) - go func() { - for m := range recs { - _ = h.Log(m) - } - }() - return ChannelHandler(recs) -} - -// LazyHandler writes all values to the wrapped handler after evaluating -// any lazy functions in the record's context. It is already wrapped -// around StreamHandler and SyslogHandler in this library, you'll only need -// it if you write your own Handler. -func LazyHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - // go through the values (odd indices) and reassign - // the values of any lazy fn to the result of its execution - hadErr := false - for i := 1; i < len(r.Ctx); i += 2 { - lz, ok := r.Ctx[i].(Lazy) - if ok { - v, err := evaluateLazy(lz) - if err != nil { - hadErr = true - r.Ctx[i] = err - } else { - if cs, ok := v.(stack.CallStack); ok { - v = cs.TrimBelow(r.Call).TrimRuntime() - } - r.Ctx[i] = v - } - } - } - - if hadErr { - r.Ctx = append(r.Ctx, errorKey, "bad lazy") - } - - return h.Log(r) - }) -} - -func evaluateLazy(lz Lazy) (interface{}, error) { - t := reflect.TypeOf(lz.Fn) - - if t.Kind() != reflect.Func { - return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn) - } - - if t.NumIn() > 0 { - return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn) - } - - if t.NumOut() == 0 { - return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn) - } - - value := reflect.ValueOf(lz.Fn) - results := value.Call([]reflect.Value{}) - if len(results) == 1 { - return results[0].Interface(), nil - } - values := make([]interface{}, len(results)) - for i, v := range results { - values[i] = v.Interface() - } - return values, nil -} - -// DiscardHandler reports success for all writes but does nothing. -// It is useful for dynamically disabling logging at runtime via -// a Logger's SetHandler method. -func DiscardHandler() Handler { - return FuncHandler(func(r *Record) error { - return nil - }) -} - -// Must provides the following Handler creation functions -// which instead of returning an error parameter only return a Handler -// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler -var Must muster - -func must(h Handler, err error) Handler { - if err != nil { - panic(err) - } - return h -} - -type muster struct{} - -func (m muster) FileHandler(path string, fmtr Format) Handler { - return must(FileHandler(path, fmtr)) -} - -func (m muster) NetHandler(network, addr string, fmtr Format) Handler { - return must(NetHandler(network, addr, fmtr)) -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/handler_glog.go b/vendor/github.com/ethereum/go-ethereum/log/handler_glog.go deleted file mode 100644 index 9b1d4ef..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/handler_glog.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package log - -import ( - "errors" - "fmt" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" -) - -// errVmoduleSyntax is returned when a user vmodule pattern is invalid. -var errVmoduleSyntax = errors.New("expect comma-separated list of filename=N") - -// errTraceSyntax is returned when a user backtrace pattern is invalid. -var errTraceSyntax = errors.New("expect file.go:234") - -// GlogHandler is a log handler that mimics the filtering features of Google's -// glog logger: setting global log levels; overriding with callsite pattern -// matches; and requesting backtraces at certain positions. -type GlogHandler struct { - origin Handler // The origin handler this wraps - - level uint32 // Current log level, atomically accessible - override uint32 // Flag whether overrides are used, atomically accessible - backtrace uint32 // Flag whether backtrace location is set - - patterns []pattern // Current list of patterns to override with - siteCache map[uintptr]Lvl // Cache of callsite pattern evaluations - location string // file:line location where to do a stackdump at - lock sync.RWMutex // Lock protecting the override pattern list -} - -// NewGlogHandler creates a new log handler with filtering functionality similar -// to Google's glog logger. The returned handler implements Handler. -func NewGlogHandler(h Handler) *GlogHandler { - return &GlogHandler{ - origin: h, - } -} - -// SetHandler updates the handler to write records to the specified sub-handler. -func (h *GlogHandler) SetHandler(nh Handler) { - h.origin = nh -} - -// pattern contains a filter for the Vmodule option, holding a verbosity level -// and a file pattern to match. -type pattern struct { - pattern *regexp.Regexp - level Lvl -} - -// Verbosity sets the glog verbosity ceiling. The verbosity of individual packages -// and source files can be raised using Vmodule. -func (h *GlogHandler) Verbosity(level Lvl) { - atomic.StoreUint32(&h.level, uint32(level)) -} - -// Vmodule sets the glog verbosity pattern. -// -// The syntax of the argument is a comma-separated list of pattern=N, where the -// pattern is a literal file name or "glob" pattern matching and N is a V level. -// -// For instance: -// -// pattern="gopher.go=3" -// sets the V level to 3 in all Go files named "gopher.go" -// -// pattern="foo=3" -// sets V to 3 in all files of any packages whose import path ends in "foo" -// -// pattern="foo/*=3" -// sets V to 3 in all files of any packages whose import path contains "foo" -func (h *GlogHandler) Vmodule(ruleset string) error { - var filter []pattern - for _, rule := range strings.Split(ruleset, ",") { - // Empty strings such as from a trailing comma can be ignored - if len(rule) == 0 { - continue - } - // Ensure we have a pattern = level filter rule - parts := strings.Split(rule, "=") - if len(parts) != 2 { - return errVmoduleSyntax - } - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if len(parts[0]) == 0 || len(parts[1]) == 0 { - return errVmoduleSyntax - } - // Parse the level and if correct, assemble the filter rule - level, err := strconv.Atoi(parts[1]) - if err != nil { - return errVmoduleSyntax - } - if level <= 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // Compile the rule pattern into a regular expression - matcher := ".*" - for _, comp := range strings.Split(parts[0], "/") { - if comp == "*" { - matcher += "(/.*)?" - } else if comp != "" { - matcher += "/" + regexp.QuoteMeta(comp) - } - } - if !strings.HasSuffix(parts[0], ".go") { - matcher += "/[^/]+\\.go" - } - matcher = matcher + "$" - - re, _ := regexp.Compile(matcher) - filter = append(filter, pattern{re, Lvl(level)}) - } - // Swap out the vmodule pattern for the new filter system - h.lock.Lock() - defer h.lock.Unlock() - - h.patterns = filter - h.siteCache = make(map[uintptr]Lvl) - atomic.StoreUint32(&h.override, uint32(len(filter))) - - return nil -} - -// BacktraceAt sets the glog backtrace location. When set to a file and line -// number holding a logging statement, a stack trace will be written to the Info -// log whenever execution hits that statement. -// -// Unlike with Vmodule, the ".go" must be present. -func (h *GlogHandler) BacktraceAt(location string) error { - // Ensure the backtrace location contains two non-empty elements - parts := strings.Split(location, ":") - if len(parts) != 2 { - return errTraceSyntax - } - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if len(parts[0]) == 0 || len(parts[1]) == 0 { - return errTraceSyntax - } - // Ensure the .go prefix is present and the line is valid - if !strings.HasSuffix(parts[0], ".go") { - return errTraceSyntax - } - if _, err := strconv.Atoi(parts[1]); err != nil { - return errTraceSyntax - } - // All seems valid - h.lock.Lock() - defer h.lock.Unlock() - - h.location = location - atomic.StoreUint32(&h.backtrace, uint32(len(location))) - - return nil -} - -// Log implements Handler.Log, filtering a log record through the global, local -// and backtrace filters, finally emitting it if either allow it through. -func (h *GlogHandler) Log(r *Record) error { - // If backtracing is requested, check whether this is the callsite - if atomic.LoadUint32(&h.backtrace) > 0 { - // Everything below here is slow. Although we could cache the call sites the - // same way as for vmodule, backtracing is so rare it's not worth the extra - // complexity. - h.lock.RLock() - match := h.location == r.Call.String() - h.lock.RUnlock() - - if match { - // Callsite matched, raise the log level to info and gather the stacks - r.Lvl = LvlInfo - - buf := make([]byte, 1024*1024) - buf = buf[:runtime.Stack(buf, true)] - r.Msg += "\n\n" + string(buf) - } - } - // If the global log level allows, fast track logging - if atomic.LoadUint32(&h.level) >= uint32(r.Lvl) { - return h.origin.Log(r) - } - // If no local overrides are present, fast track skipping - if atomic.LoadUint32(&h.override) == 0 { - return nil - } - // Check callsite cache for previously calculated log levels - h.lock.RLock() - lvl, ok := h.siteCache[r.Call.Frame().PC] - h.lock.RUnlock() - - // If we didn't cache the callsite yet, calculate it - if !ok { - h.lock.Lock() - for _, rule := range h.patterns { - if rule.pattern.MatchString(fmt.Sprintf("%+s", r.Call)) { - h.siteCache[r.Call.Frame().PC], lvl, ok = rule.level, rule.level, true - break - } - } - // If no rule matched, remember to drop log the next time - if !ok { - h.siteCache[r.Call.Frame().PC] = 0 - } - h.lock.Unlock() - } - if lvl >= r.Lvl { - return h.origin.Log(r) - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/handler_go13.go b/vendor/github.com/ethereum/go-ethereum/log/handler_go13.go deleted file mode 100644 index 0843ed0..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/handler_go13.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.4 - -package log - -import ( - "sync/atomic" - "unsafe" -) - -// swapHandler wraps another handler that may be swapped out -// dynamically at runtime in a thread-safe fashion. -type swapHandler struct { - handler unsafe.Pointer -} - -func (h *swapHandler) Log(r *Record) error { - return h.Get().Log(r) -} - -func (h *swapHandler) Get() Handler { - return *(*Handler)(atomic.LoadPointer(&h.handler)) -} - -func (h *swapHandler) Swap(newHandler Handler) { - atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/handler_go14.go b/vendor/github.com/ethereum/go-ethereum/log/handler_go14.go deleted file mode 100644 index 05dedbf..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/handler_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build go1.4 - -package log - -import "sync/atomic" - -// swapHandler wraps another handler that may be swapped out -// dynamically at runtime in a thread-safe fashion. -type swapHandler struct { - handler atomic.Value -} - -func (h *swapHandler) Log(r *Record) error { - return (*h.handler.Load().(*Handler)).Log(r) -} - -func (h *swapHandler) Swap(newHandler Handler) { - h.handler.Store(&newHandler) -} - -func (h *swapHandler) Get() Handler { - return *h.handler.Load().(*Handler) -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/logger.go b/vendor/github.com/ethereum/go-ethereum/log/logger.go deleted file mode 100644 index 276d696..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/logger.go +++ /dev/null @@ -1,245 +0,0 @@ -package log - -import ( - "fmt" - "os" - "time" - - "github.com/go-stack/stack" -) - -const timeKey = "t" -const lvlKey = "lvl" -const msgKey = "msg" -const ctxKey = "ctx" -const errorKey = "LOG15_ERROR" -const skipLevel = 2 - -type Lvl int - -const ( - LvlCrit Lvl = iota - LvlError - LvlWarn - LvlInfo - LvlDebug - LvlTrace -) - -// AlignedString returns a 5-character string containing the name of a Lvl. -func (l Lvl) AlignedString() string { - switch l { - case LvlTrace: - return "TRACE" - case LvlDebug: - return "DEBUG" - case LvlInfo: - return "INFO " - case LvlWarn: - return "WARN " - case LvlError: - return "ERROR" - case LvlCrit: - return "CRIT " - default: - panic("bad level") - } -} - -// Strings returns the name of a Lvl. -func (l Lvl) String() string { - switch l { - case LvlTrace: - return "trce" - case LvlDebug: - return "dbug" - case LvlInfo: - return "info" - case LvlWarn: - return "warn" - case LvlError: - return "eror" - case LvlCrit: - return "crit" - default: - panic("bad level") - } -} - -// LvlFromString returns the appropriate Lvl from a string name. -// Useful for parsing command line args and configuration files. -func LvlFromString(lvlString string) (Lvl, error) { - switch lvlString { - case "trace", "trce": - return LvlTrace, nil - case "debug", "dbug": - return LvlDebug, nil - case "info": - return LvlInfo, nil - case "warn": - return LvlWarn, nil - case "error", "eror": - return LvlError, nil - case "crit": - return LvlCrit, nil - default: - return LvlDebug, fmt.Errorf("unknown level: %v", lvlString) - } -} - -// A Record is what a Logger asks its handler to write -type Record struct { - Time time.Time - Lvl Lvl - Msg string - Ctx []interface{} - Call stack.Call - KeyNames RecordKeyNames -} - -// RecordKeyNames gets stored in a Record when the write function is executed. -type RecordKeyNames struct { - Time string - Msg string - Lvl string - Ctx string -} - -// A Logger writes key/value pairs to a Handler -type Logger interface { - // New returns a new Logger that has this logger's context plus the given context - New(ctx ...interface{}) Logger - - // GetHandler gets the handler associated with the logger. - GetHandler() Handler - - // SetHandler updates the logger to write records to the specified handler. - SetHandler(h Handler) - - // Log a message at the given level with context key/value pairs - Trace(msg string, ctx ...interface{}) - Debug(msg string, ctx ...interface{}) - Info(msg string, ctx ...interface{}) - Warn(msg string, ctx ...interface{}) - Error(msg string, ctx ...interface{}) - Crit(msg string, ctx ...interface{}) -} - -type logger struct { - ctx []interface{} - h *swapHandler -} - -func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) { - l.h.Log(&Record{ - Time: time.Now(), - Lvl: lvl, - Msg: msg, - Ctx: newContext(l.ctx, ctx), - Call: stack.Caller(skip), - KeyNames: RecordKeyNames{ - Time: timeKey, - Msg: msgKey, - Lvl: lvlKey, - Ctx: ctxKey, - }, - }) -} - -func (l *logger) New(ctx ...interface{}) Logger { - child := &logger{newContext(l.ctx, ctx), new(swapHandler)} - child.SetHandler(l.h) - return child -} - -func newContext(prefix []interface{}, suffix []interface{}) []interface{} { - normalizedSuffix := normalize(suffix) - newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix)) - n := copy(newCtx, prefix) - copy(newCtx[n:], normalizedSuffix) - return newCtx -} - -func (l *logger) Trace(msg string, ctx ...interface{}) { - l.write(msg, LvlTrace, ctx, skipLevel) -} - -func (l *logger) Debug(msg string, ctx ...interface{}) { - l.write(msg, LvlDebug, ctx, skipLevel) -} - -func (l *logger) Info(msg string, ctx ...interface{}) { - l.write(msg, LvlInfo, ctx, skipLevel) -} - -func (l *logger) Warn(msg string, ctx ...interface{}) { - l.write(msg, LvlWarn, ctx, skipLevel) -} - -func (l *logger) Error(msg string, ctx ...interface{}) { - l.write(msg, LvlError, ctx, skipLevel) -} - -func (l *logger) Crit(msg string, ctx ...interface{}) { - l.write(msg, LvlCrit, ctx, skipLevel) - os.Exit(1) -} - -func (l *logger) GetHandler() Handler { - return l.h.Get() -} - -func (l *logger) SetHandler(h Handler) { - l.h.Swap(h) -} - -func normalize(ctx []interface{}) []interface{} { - // if the caller passed a Ctx object, then expand it - if len(ctx) == 1 { - if ctxMap, ok := ctx[0].(Ctx); ok { - ctx = ctxMap.toArray() - } - } - - // ctx needs to be even because it's a series of key/value pairs - // no one wants to check for errors on logging functions, - // so instead of erroring on bad input, we'll just make sure - // that things are the right length and users can fix bugs - // when they see the output looks wrong - if len(ctx)%2 != 0 { - ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil") - } - - return ctx -} - -// Lazy allows you to defer calculation of a logged value that is expensive -// to compute until it is certain that it must be evaluated with the given filters. -// -// Lazy may also be used in conjunction with a Logger's New() function -// to generate a child logger which always reports the current value of changing -// state. -// -// You may wrap any function which takes no arguments to Lazy. It may return any -// number of values of any type. -type Lazy struct { - Fn interface{} -} - -// Ctx is a map of key/value pairs to pass as context to a log function -// Use this only if you really need greater safety around the arguments you pass -// to the logging functions. -type Ctx map[string]interface{} - -func (c Ctx) toArray() []interface{} { - arr := make([]interface{}, len(c)*2) - - i := 0 - for k, v := range c { - arr[i] = k - arr[i+1] = v - i += 2 - } - - return arr -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/root.go b/vendor/github.com/ethereum/go-ethereum/log/root.go deleted file mode 100644 index 9fb4c5a..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/root.go +++ /dev/null @@ -1,70 +0,0 @@ -package log - -import ( - "os" -) - -var ( - root = &logger{[]interface{}{}, new(swapHandler)} - StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat()) - StderrHandler = StreamHandler(os.Stderr, LogfmtFormat()) -) - -func init() { - root.SetHandler(DiscardHandler()) -} - -// New returns a new logger with the given context. -// New is a convenient alias for Root().New -func New(ctx ...interface{}) Logger { - return root.New(ctx...) -} - -// Root returns the root logger -func Root() Logger { - return root -} - -// The following functions bypass the exported logger methods (logger.Debug, -// etc.) to keep the call depth the same for all paths to logger.write so -// runtime.Caller(2) always refers to the call site in client code. - -// Trace is a convenient alias for Root().Trace -func Trace(msg string, ctx ...interface{}) { - root.write(msg, LvlTrace, ctx, skipLevel) -} - -// Debug is a convenient alias for Root().Debug -func Debug(msg string, ctx ...interface{}) { - root.write(msg, LvlDebug, ctx, skipLevel) -} - -// Info is a convenient alias for Root().Info -func Info(msg string, ctx ...interface{}) { - root.write(msg, LvlInfo, ctx, skipLevel) -} - -// Warn is a convenient alias for Root().Warn -func Warn(msg string, ctx ...interface{}) { - root.write(msg, LvlWarn, ctx, skipLevel) -} - -// Error is a convenient alias for Root().Error -func Error(msg string, ctx ...interface{}) { - root.write(msg, LvlError, ctx, skipLevel) -} - -// Crit is a convenient alias for Root().Crit -func Crit(msg string, ctx ...interface{}) { - root.write(msg, LvlCrit, ctx, skipLevel) - os.Exit(1) -} - -// Output is a convenient alias for write, allowing for the modification of -// the calldepth (number of stack frames to skip). -// calldepth influences the reported line number of the log message. -// A calldepth of zero reports the immediate caller of Output. -// Non-zero calldepth skips as many stack frames. -func Output(msg string, lvl Lvl, calldepth int, ctx ...interface{}) { - root.write(msg, lvl, ctx, calldepth+skipLevel) -} diff --git a/vendor/github.com/ethereum/go-ethereum/log/syslog.go b/vendor/github.com/ethereum/go-ethereum/log/syslog.go deleted file mode 100644 index 71a17b3..0000000 --- a/vendor/github.com/ethereum/go-ethereum/log/syslog.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !windows,!plan9 - -package log - -import ( - "log/syslog" - "strings" -) - -// SyslogHandler opens a connection to the system syslog daemon by calling -// syslog.New and writes all records to it. -func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) { - wr, err := syslog.New(priority, tag) - return sharedSyslog(fmtr, wr, err) -} - -// SyslogNetHandler opens a connection to a log daemon over the network and writes -// all log records to it. -func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) { - wr, err := syslog.Dial(net, addr, priority, tag) - return sharedSyslog(fmtr, wr, err) -} - -func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) { - if err != nil { - return nil, err - } - h := FuncHandler(func(r *Record) error { - var syslogFn = sysWr.Info - switch r.Lvl { - case LvlCrit: - syslogFn = sysWr.Crit - case LvlError: - syslogFn = sysWr.Err - case LvlWarn: - syslogFn = sysWr.Warning - case LvlInfo: - syslogFn = sysWr.Info - case LvlDebug: - syslogFn = sysWr.Debug - case LvlTrace: - syslogFn = func(m string) error { return nil } // There's no syslog level for trace - } - - s := strings.TrimSpace(string(fmtr.Format(r))) - return syslogFn(s) - }) - return LazyHandler(&closingHandler{sysWr, h}), nil -} - -func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler { - return must(SyslogHandler(priority, tag, fmtr)) -} - -func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler { - return must(SyslogNetHandler(net, addr, priority, tag, fmtr)) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/FORK.md b/vendor/github.com/ethereum/go-ethereum/metrics/FORK.md deleted file mode 100644 index b19985b..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/FORK.md +++ /dev/null @@ -1 +0,0 @@ -This repo has been forked from https://github.com/rcrowley/go-metrics at commit e181e09 diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/LICENSE b/vendor/github.com/ethereum/go-ethereum/metrics/LICENSE deleted file mode 100644 index 363fa9e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/README.md b/vendor/github.com/ethereum/go-ethereum/metrics/README.md deleted file mode 100644 index e2d7945..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/README.md +++ /dev/null @@ -1,166 +0,0 @@ -go-metrics -========== - -![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master) - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -r := NewRegistry() -g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() }) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Register() is not threadsafe. For threadsafe metric registration use -GetOrRegister: - -```go -t := metrics.GetOrRegisterTimer("account.create.latency", nil) -t.Time(func() {}) -t.Update(47) -``` - -**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will -leak memory: - -```go -// Will call Stop() on the Meter to allow for garbage collection -metrics.Unregister("quux") -// Or similarly for a Timer that embeds a Meter -metrics.Unregister("bang") -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite): - -```go - -import "github.com/cyberdelia/go-metrics-graphite" - -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -**NOTE:** this has been pulled out of the library due to constant fluctuations -in the InfluxDB API. In fact, all client libraries are on their way out. see -issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and -[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details. - -```go -import "github.com/vrischmann/go-metrics-influxdb" - -go influxdb.InfluxDB(metrics.DefaultRegistry, - 10e9, - "127.0.0.1:8086", - "database-name", - "username", - "password" -) -``` - -Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato): - -**Note**: the client included with this repository under the `librato` package -has been deprecated and moved to the repository linked above. - -```go -import "github.com/mihasya/go-metrics-librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // percentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Maintain all metrics along with expvars at `/debug/metrics`: - -This uses the same mechanism as [the official expvar](https://golang.org/pkg/expvar/) -but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars -as well as all your go-metrics. - - -```go -import "github.com/rcrowley/go-metrics/exp" - -exp.Exp(metrics.DefaultRegistry) -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` - -Publishing Metrics ------------------- - -Clients are available for the following destinations: - -* Librato - https://github.com/mihasya/go-metrics-librato -* Graphite - https://github.com/cyberdelia/go-metrics-graphite -* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb -* Ganglia - https://github.com/appscode/metlia -* Prometheus - https://github.com/deathowl/go-metrics-prometheus -* DataDog - https://github.com/syntaqx/go-metrics-datadog -* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/counter.go b/vendor/github.com/ethereum/go-ethereum/metrics/counter.go deleted file mode 100644 index 2f78c90..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/counter.go +++ /dev/null @@ -1,144 +0,0 @@ -package metrics - -import ( - "sync/atomic" -) - -// Counters hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Count() int64 - Dec(int64) - Inc(int64) - Snapshot() Counter -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a -// new Counter no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterCounterForced(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounterForced).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if !Enabled { - return NilCounter{} - } - return &StandardCounter{0} -} - -// NewCounterForced constructs a new StandardCounter and returns it no matter if -// the global switch is enabled or not. -func NewCounterForced() Counter { - return &StandardCounter{0} -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewRegisteredCounterForced constructs and registers a new StandardCounter -// and launches a goroutine no matter the global switch is enabled or not. -// Be sure to unregister the counter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredCounterForced(name string, r Registry) Counter { - c := NewCounterForced() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} - -// Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count int64 -} - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/cpu.go b/vendor/github.com/ethereum/go-ethereum/metrics/cpu.go deleted file mode 100644 index 3278d81..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/cpu.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -import "github.com/elastic/gosigar" - -// CPUStats is the system and process CPU stats. -type CPUStats struct { - GlobalTime int64 // Time spent by the CPU working on all processes - GlobalWait int64 // Time spent by waiting on disk for all processes - LocalTime int64 // Time spent by the CPU working on this process -} - -// ReadCPUStats retrieves the current CPU stats. -func ReadCPUStats(stats *CPUStats) { - global := gosigar.Cpu{} - global.Get() - - stats.GlobalTime = int64(global.User + global.Nice + global.Sys) - stats.GlobalWait = int64(global.Wait) - stats.LocalTime = getProcessCPUTime() -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/cpu_syscall.go b/vendor/github.com/ethereum/go-ethereum/metrics/cpu_syscall.go deleted file mode 100644 index e245453..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/cpu_syscall.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build !windows - -package metrics - -import ( - "syscall" - - "github.com/ethereum/go-ethereum/log" -) - -// getProcessCPUTime retrieves the process' CPU time since program startup. -func getProcessCPUTime() int64 { - var usage syscall.Rusage - if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil { - log.Warn("Failed to retrieve CPU time", "err", err) - return 0 - } - return int64(usage.Utime.Sec+usage.Stime.Sec)*100 + int64(usage.Utime.Usec+usage.Stime.Usec)/10000 //nolint:unconvert -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/cpu_windows.go b/vendor/github.com/ethereum/go-ethereum/metrics/cpu_windows.go deleted file mode 100644 index fb29a52..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/cpu_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// getProcessCPUTime returns 0 on Windows as there is no system call to resolve -// the actual process' CPU time. -func getProcessCPUTime() int64 { - return 0 -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/debug.go b/vendor/github.com/ethereum/go-ethereum/metrics/debug.go deleted file mode 100644 index de4a273..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(gcStats.LastGC.UnixNano()) - debugMetrics.GCStats.NumGC.Update(gcStats.NumGC) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/disk.go b/vendor/github.com/ethereum/go-ethereum/metrics/disk.go deleted file mode 100644 index 25142d2..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/disk.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package metrics - -// DiskStats is the per process disk io stats. -type DiskStats struct { - ReadCount int64 // Number of read operations executed - ReadBytes int64 // Total number of bytes read - WriteCount int64 // Number of write operations executed - WriteBytes int64 // Total number of byte written -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/disk_linux.go b/vendor/github.com/ethereum/go-ethereum/metrics/disk_linux.go deleted file mode 100644 index 8d610cd..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/disk_linux.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the Linux implementation of process disk IO counter retrieval. - -package metrics - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// ReadDiskStats retrieves the disk IO stats belonging to the current process. -func ReadDiskStats(stats *DiskStats) error { - // Open the process disk IO counter file - inf, err := os.Open(fmt.Sprintf("/proc/%d/io", os.Getpid())) - if err != nil { - return err - } - defer inf.Close() - in := bufio.NewReader(inf) - - // Iterate over the IO counter, and extract what we need - for { - // Read the next line and split to key and value - line, err := in.ReadString('\n') - if err != nil { - if err == io.EOF { - return nil - } - return err - } - parts := strings.Split(line, ":") - if len(parts) != 2 { - continue - } - key := strings.TrimSpace(parts[0]) - value, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64) - if err != nil { - return err - } - - // Update the counter based on the key - switch key { - case "syscr": - stats.ReadCount = value - case "syscw": - stats.WriteCount = value - case "rchar": - stats.ReadBytes = value - case "wchar": - stats.WriteBytes = value - } - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/disk_nop.go b/vendor/github.com/ethereum/go-ethereum/metrics/disk_nop.go deleted file mode 100644 index 4319f8b..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/disk_nop.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build !linux - -package metrics - -import "errors" - -// ReadDiskStats retrieves the disk IO stats belonging to the current process. -func ReadDiskStats(stats *DiskStats) error { - return errors.New("Not implemented") -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/doc.go b/vendor/github.com/ethereum/go-ethereum/metrics/doc.go deleted file mode 100644 index 13f429c..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -package metrics - -const epsilon = 0.0000000000000001 -const epsilonPercentile = .00000000001 diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go b/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go deleted file mode 100644 index 57c949e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/ewma.go +++ /dev/null @@ -1,115 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" -) - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Rate() float64 - Snapshot() EWMA - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment - alpha float64 - rate float64 - init bool - mutex sync.Mutex -} - -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/gauge.go b/vendor/github.com/ethereum/go-ethereum/metrics/gauge.go deleted file mode 100644 index b6b2758..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/gauge.go +++ /dev/null @@ -1,158 +0,0 @@ -package metrics - -import "sync/atomic" - -// Gauges hold an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() Gauge - Update(int64) - Dec(int64) - Inc(int64) - Value() int64 -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if !Enabled { - return NilGauge{} - } - return &StandardGauge{0} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGauge(f func() int64) Gauge { - if !Enabled { - return NilGauge{} - } - return &FunctionalGauge{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { - c := NewFunctionalGauge(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Dec panics. -func (GaugeSnapshot) Dec(int64) { - panic("Dec called on a GaugeSnapshot") -} - -// Inc panics. -func (GaugeSnapshot) Inc(int64) { - panic("Inc called on a GaugeSnapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Dec is a no-op. -func (NilGauge) Dec(i int64) {} - -// Inc is a no-op. -func (NilGauge) Inc(i int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) -} - -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) -} - -// Dec decrements the gauge's current value by the given amount. -func (g *StandardGauge) Dec(i int64) { - atomic.AddInt64(&g.value, -i) -} - -// Inc increments the gauge's current value by the given amount. -func (g *StandardGauge) Inc(i int64) { - atomic.AddInt64(&g.value, i) -} - -// FunctionalGauge returns value from given function -type FunctionalGauge struct { - value func() int64 -} - -// Value returns the gauge's current value. -func (g FunctionalGauge) Value() int64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } - -// Update panics. -func (FunctionalGauge) Update(int64) { - panic("Update called on a FunctionalGauge") -} - -// Dec panics. -func (FunctionalGauge) Dec(int64) { - panic("Dec called on a FunctionalGauge") -} - -// Inc panics. -func (FunctionalGauge) Inc(int64) { - panic("Inc called on a FunctionalGauge") -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/gauge_float64.go b/vendor/github.com/ethereum/go-ethereum/metrics/gauge_float64.go deleted file mode 100644 index 66819c9..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/gauge_float64.go +++ /dev/null @@ -1,127 +0,0 @@ -package metrics - -import "sync" - -// GaugeFloat64s hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64 - Update(float64) - Value() float64 -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if !Enabled { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{ - value: 0.0, - } -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewFunctionalGauge constructs a new FunctionalGauge. -func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { - if !Enabled { - return NilGaugeFloat64{} - } - return &FunctionalGaugeFloat64{value: f} -} - -// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. -func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { - c := NewFunctionalGaugeFloat64(f) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGauge is a no-op Gauge. -type NilGaugeFloat64 struct{} - -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. -type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v -} - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value -} - -// FunctionalGaugeFloat64 returns value from given function -type FunctionalGaugeFloat64 struct { - value func() float64 -} - -// Value returns the gauge's current value. -func (g FunctionalGaugeFloat64) Value() float64 { - return g.value() -} - -// Snapshot returns the snapshot. -func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } - -// Update panics. -func (FunctionalGaugeFloat64) Update(float64) { - panic("Update called on a FunctionalGaugeFloat64") -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/graphite.go b/vendor/github.com/ethereum/go-ethereum/metrics/graphite.go deleted file mode 100644 index 142eec8..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/graphite.go +++ /dev/null @@ -1,113 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - for range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -// GraphiteOnce performs a single submission to Graphite, returning a -// non-nil error on failed connections. This can be used in a loop -// similar to GraphiteWithConfig for custom error handling. -func GraphiteOnce(c GraphiteConfig) error { - log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") - return graphite(&c) -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/healthcheck.go b/vendor/github.com/ethereum/go-ethereum/metrics/healthcheck.go deleted file mode 100644 index f1ae31e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthchecks hold an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if !Enabled { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/histogram.go b/vendor/github.com/ethereum/go-ethereum/metrics/histogram.go deleted file mode 100644 index 46f3bbd..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/histogram.go +++ /dev/null @@ -1,202 +0,0 @@ -package metrics - -// Histograms calculate distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Sum() int64 - Update(int64) - Variance() float64 -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if !Enabled { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample at the time the snapshot was taken. -func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilHistogram) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} -} - -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Sum returns the sum in the sample. -func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/json.go b/vendor/github.com/ethereum/go-ethereum/metrics/json.go deleted file mode 100644 index 2087d82..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/json.go +++ /dev/null @@ -1,31 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r *StandardRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(r.GetAll()) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} - -func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { - return json.Marshal(p.GetAll()) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/log.go b/vendor/github.com/ethereum/go-ethereum/metrics/log.go deleted file mode 100644 index 0c8ea7c..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/log.go +++ /dev/null @@ -1,80 +0,0 @@ -package metrics - -import ( - "time" -) - -type Logger interface { - Printf(format string, v ...interface{}) -} - -func Log(r Registry, freq time.Duration, l Logger) { - LogScaled(r, freq, time.Nanosecond, l) -} - -// Output each metric in the given registry periodically using the given -// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. -func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { - du := float64(scale) - duSuffix := scale.String()[1:] - - for range time.Tick(freq) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) - l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) - l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) - l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) - l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) - l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) - l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) - l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) - l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/memory.md b/vendor/github.com/ethereum/go-ethereum/metrics/memory.md deleted file mode 100644 index 47454f5..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/meter.go b/vendor/github.com/ethereum/go-ethereum/metrics/meter.go deleted file mode 100644 index 58d170f..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/meter.go +++ /dev/null @@ -1,300 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Count() int64 - Mark(int64) - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Meter - Stop() -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a -// new StandardMeter no matter the global switch is enabled or not. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterMeterForced(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeterForced).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeter() Meter { - if !Enabled { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter -// the global switch is enabled or not. -// Be sure to call Stop() once the meter is of no use to allow for garbage collection. -func NewMeterForced() Meter { - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters[m] = struct{}{} - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewRegisteredMeter constructs and registers a new StandardMeter -// and launches a goroutine. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewRegisteredMeterForced constructs and registers a new StandardMeter -// and launches a goroutine no matter the global switch is enabled or not. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredMeterForced(name string, r Registry) Meter { - c := NewMeterForced() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean float64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// Stop is a no-op. -func (m *MeterSnapshot) Stop() {} - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// Stop is a no-op. -func (NilMeter) Stop() {} - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - lock sync.RWMutex - snapshot *MeterSnapshot - a1, a5, a15 EWMA - startTime time.Time - stopped bool -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - snapshot: &MeterSnapshot{}, - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. -func (m *StandardMeter) Stop() { - m.lock.Lock() - stopped := m.stopped - m.stopped = true - m.lock.Unlock() - if !stopped { - arbiter.Lock() - delete(arbiter.meters, m) - arbiter.Unlock() - } -} - -// Count returns the number of events recorded. -func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count -} - -// Mark records the occurrence of n events. -func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - if m.stopped { - return - } - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot -} - -func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() -} - -func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() - m.updateSnapshot() -} - -// meterArbiter ticks meters every 5s from a single goroutine. -// meters are references in a set for future stopping. -type meterArbiter struct { - sync.RWMutex - started bool - meters map[*StandardMeter]struct{} - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for range ma.ticker.C { - ma.tickMeters() - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for meter := range ma.meters { - meter.tick() - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/metrics.go b/vendor/github.com/ethereum/go-ethereum/metrics/metrics.go deleted file mode 100644 index 747d647..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/metrics.go +++ /dev/null @@ -1,126 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -import ( - "os" - "runtime" - "strings" - "time" - - "github.com/ethereum/go-ethereum/log" -) - -// Enabled is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var Enabled = false - -// EnabledExpensive is a soft-flag meant for external packages to check if costly -// metrics gathering is allowed or not. The goal is to separate standard metrics -// for health monitoring and debug metrics that might impact runtime performance. -var EnabledExpensive = false - -// enablerFlags is the CLI flag names to use to enable metrics collections. -var enablerFlags = []string{"metrics"} - -// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections. -var expensiveEnablerFlags = []string{"metrics.expensive"} - -// Init enables or disables the metrics system. Since we need this to run before -// any other code gets to create meters and timers, we'll actually do an ugly hack -// and peek into the command line args for the metrics flag. -func init() { - for _, arg := range os.Args { - flag := strings.TrimLeft(arg, "-") - - for _, enabler := range enablerFlags { - if !Enabled && flag == enabler { - log.Info("Enabling metrics collection") - Enabled = true - } - } - for _, enabler := range expensiveEnablerFlags { - if !EnabledExpensive && flag == enabler { - log.Info("Enabling expensive metrics collection") - EnabledExpensive = true - } - } - } -} - -// CollectProcessMetrics periodically collects various metrics about the running -// process. -func CollectProcessMetrics(refresh time.Duration) { - // Short circuit if the metrics system is disabled - if !Enabled { - return - } - refreshFreq := int64(refresh / time.Second) - - // Create the various data collectors - cpuStats := make([]*CPUStats, 2) - memstats := make([]*runtime.MemStats, 2) - diskstats := make([]*DiskStats, 2) - for i := 0; i < len(memstats); i++ { - cpuStats[i] = new(CPUStats) - memstats[i] = new(runtime.MemStats) - diskstats[i] = new(DiskStats) - } - // Define the various metrics to collect - var ( - cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry) - cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry) - cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry) - cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry) - cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry) - - memPauses = GetOrRegisterMeter("system/memory/pauses", DefaultRegistry) - memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry) - memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry) - memHeld = GetOrRegisterGauge("system/memory/held", DefaultRegistry) - memUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry) - - diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry) - diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry) - diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry) - diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry) - diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry) - diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry) - ) - // Iterate loading the different stats and updating the meters - for i := 1; ; i++ { - location1 := i % 2 - location2 := (i - 1) % 2 - - ReadCPUStats(cpuStats[location1]) - cpuSysLoad.Update((cpuStats[location1].GlobalTime - cpuStats[location2].GlobalTime) / refreshFreq) - cpuSysWait.Update((cpuStats[location1].GlobalWait - cpuStats[location2].GlobalWait) / refreshFreq) - cpuProcLoad.Update((cpuStats[location1].LocalTime - cpuStats[location2].LocalTime) / refreshFreq) - cpuThreads.Update(int64(threadCreateProfile.Count())) - cpuGoroutines.Update(int64(runtime.NumGoroutine())) - - runtime.ReadMemStats(memstats[location1]) - memPauses.Mark(int64(memstats[location1].PauseTotalNs - memstats[location2].PauseTotalNs)) - memAllocs.Mark(int64(memstats[location1].Mallocs - memstats[location2].Mallocs)) - memFrees.Mark(int64(memstats[location1].Frees - memstats[location2].Frees)) - memHeld.Update(int64(memstats[location1].HeapSys - memstats[location1].HeapReleased)) - memUsed.Update(int64(memstats[location1].Alloc)) - - if ReadDiskStats(diskstats[location1]) == nil { - diskReads.Mark(diskstats[location1].ReadCount - diskstats[location2].ReadCount) - diskReadBytes.Mark(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) - diskWrites.Mark(diskstats[location1].WriteCount - diskstats[location2].WriteCount) - diskWriteBytes.Mark(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) - - diskReadBytesCounter.Inc(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) - diskWriteBytesCounter.Inc(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) - } - time.Sleep(refresh) - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/opentsdb.go b/vendor/github.com/ethereum/go-ethereum/metrics/opentsdb.go deleted file mode 100644 index 3fde554..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/opentsdb.go +++ /dev/null @@ -1,119 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "os" - "strings" - "time" -) - -var shortHostName = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/registry.go b/vendor/github.com/ethereum/go-ethereum/metrics/registry.go deleted file mode 100644 index c5435ad..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/registry.go +++ /dev/null @@ -1,358 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "strings" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // GetAll metrics in the Registry. - GetAll() map[string]map[string]interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) - - // Unregister all metrics. (Mostly for testing.) - UnregisterAll() -} - -// The standard implementation of a Registry is a mutex-protected map -// of names to metrics. -type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.Mutex -} - -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.metrics[name] -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - r.register(name, i) - return i -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { - h.Check() - } - } -} - -// GetAll metrics in the Registry -func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return data -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() - r.stop(name) - delete(r.metrics, name) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *StandardRegistry) UnregisterAll() { - r.mutex.Lock() - defer r.mutex.Unlock() - for name := range r.metrics { - r.stop(name) - delete(r.metrics, name) - } -} - -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } - switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer: - r.metrics[name] = i - } - return nil -} - -func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) - for name, i := range r.metrics { - metrics[name] = i - } - return metrics -} - -func (r *StandardRegistry) stop(name string) { - if i, ok := r.metrics[name]; ok { - if s, ok := i.(Stoppable); ok { - s.Stop() - } - } -} - -// Stoppable defines the metrics which has to be stopped. -type Stoppable interface { - Stop() -} - -type PrefixedRegistry struct { - underlying Registry - prefix string -} - -func NewPrefixedRegistry(prefix string) Registry { - return &PrefixedRegistry{ - underlying: NewRegistry(), - prefix: prefix, - } -} - -func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { - return &PrefixedRegistry{ - underlying: parent, - prefix: prefix, - } -} - -// Call the given function for each registered metric. -func (r *PrefixedRegistry) Each(fn func(string, interface{})) { - wrappedFn := func(prefix string) func(string, interface{}) { - return func(name string, iface interface{}) { - if strings.HasPrefix(name, prefix) { - fn(name, iface) - } else { - return - } - } - } - - baseRegistry, prefix := findPrefix(r, "") - baseRegistry.Each(wrappedFn(prefix)) -} - -func findPrefix(registry Registry, prefix string) (Registry, string) { - switch r := registry.(type) { - case *PrefixedRegistry: - return findPrefix(r.underlying, r.prefix+prefix) - case *StandardRegistry: - return r, prefix - } - return nil, "" -} - -// Get the metric by the given name or nil if none is registered. -func (r *PrefixedRegistry) Get(name string) interface{} { - realName := r.prefix + name - return r.underlying.Get(realName) -} - -// Gets an existing metric or registers the given one. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { - realName := r.prefix + name - return r.underlying.GetOrRegister(realName, metric) -} - -// Register the given metric under the given name. The name will be prefixed. -func (r *PrefixedRegistry) Register(name string, metric interface{}) error { - realName := r.prefix + name - return r.underlying.Register(realName, metric) -} - -// Run all registered healthchecks. -func (r *PrefixedRegistry) RunHealthchecks() { - r.underlying.RunHealthchecks() -} - -// GetAll metrics in the Registry -func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} { - return r.underlying.GetAll() -} - -// Unregister the metric with the given name. The name will be prefixed. -func (r *PrefixedRegistry) Unregister(name string) { - realName := r.prefix + name - r.underlying.Unregister(realName) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *PrefixedRegistry) UnregisterAll() { - r.underlying.UnregisterAll() -} - -var ( - DefaultRegistry = NewRegistry() - EphemeralRegistry = NewRegistry() - AccountingRegistry = NewRegistry() // registry used in swarm -) - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Register the given metric under the given name. Panics if a metric by the -// given name is already registered. -func MustRegister(name string, i interface{}) { - if err := Register(name, i); err != nil { - panic(err) - } -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/resetting_timer.go b/vendor/github.com/ethereum/go-ethereum/metrics/resetting_timer.go deleted file mode 100644 index e5327d3..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/resetting_timer.go +++ /dev/null @@ -1,241 +0,0 @@ -package metrics - -import ( - "math" - "sort" - "sync" - "time" -) - -// Initial slice capacity for the values stored in a ResettingTimer -const InitialResettingTimerSliceCap = 10 - -// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval. -type ResettingTimer interface { - Values() []int64 - Snapshot() ResettingTimer - Percentiles([]float64) []int64 - Mean() float64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) -} - -// GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a -// new StandardResettingTimer. -func GetOrRegisterResettingTimer(name string, r Registry) ResettingTimer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewResettingTimer).(ResettingTimer) -} - -// NewRegisteredResettingTimer constructs and registers a new StandardResettingTimer. -func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer { - c := NewResettingTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewResettingTimer constructs a new StandardResettingTimer -func NewResettingTimer() ResettingTimer { - if !Enabled { - return NilResettingTimer{} - } - return &StandardResettingTimer{ - values: make([]int64, 0, InitialResettingTimerSliceCap), - } -} - -// NilResettingTimer is a no-op ResettingTimer. -type NilResettingTimer struct { -} - -// Values is a no-op. -func (NilResettingTimer) Values() []int64 { return nil } - -// Snapshot is a no-op. -func (NilResettingTimer) Snapshot() ResettingTimer { - return &ResettingTimerSnapshot{ - values: []int64{}, - } -} - -// Time is a no-op. -func (NilResettingTimer) Time(func()) {} - -// Update is a no-op. -func (NilResettingTimer) Update(time.Duration) {} - -// Percentiles panics. -func (NilResettingTimer) Percentiles([]float64) []int64 { - panic("Percentiles called on a NilResettingTimer") -} - -// Mean panics. -func (NilResettingTimer) Mean() float64 { - panic("Mean called on a NilResettingTimer") -} - -// UpdateSince is a no-op. -func (NilResettingTimer) UpdateSince(time.Time) {} - -// StandardResettingTimer is the standard implementation of a ResettingTimer. -// and Meter. -type StandardResettingTimer struct { - values []int64 - mutex sync.Mutex -} - -// Values returns a slice with all measurements. -func (t *StandardResettingTimer) Values() []int64 { - return t.values -} - -// Snapshot resets the timer and returns a read-only copy of its contents. -func (t *StandardResettingTimer) Snapshot() ResettingTimer { - t.mutex.Lock() - defer t.mutex.Unlock() - currentValues := t.values - t.values = make([]int64, 0, InitialResettingTimerSliceCap) - - return &ResettingTimerSnapshot{ - values: currentValues, - } -} - -// Percentiles panics. -func (t *StandardResettingTimer) Percentiles([]float64) []int64 { - panic("Percentiles called on a StandardResettingTimer") -} - -// Mean panics. -func (t *StandardResettingTimer) Mean() float64 { - panic("Mean called on a StandardResettingTimer") -} - -// Record the duration of the execution of the given function. -func (t *StandardResettingTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardResettingTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.values = append(t.values, int64(d)) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardResettingTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.values = append(t.values, int64(time.Since(ts))) -} - -// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer. -type ResettingTimerSnapshot struct { - values []int64 - mean float64 - thresholdBoundaries []int64 - calculated bool -} - -// Snapshot returns the snapshot. -func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t } - -// Time panics. -func (*ResettingTimerSnapshot) Time(func()) { - panic("Time called on a ResettingTimerSnapshot") -} - -// Update panics. -func (*ResettingTimerSnapshot) Update(time.Duration) { - panic("Update called on a ResettingTimerSnapshot") -} - -// UpdateSince panics. -func (*ResettingTimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a ResettingTimerSnapshot") -} - -// Values returns all values from snapshot. -func (t *ResettingTimerSnapshot) Values() []int64 { - return t.values -} - -// Percentiles returns the boundaries for the input percentiles. -func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 { - t.calc(percentiles) - - return t.thresholdBoundaries -} - -// Mean returns the mean of the snapshotted values -func (t *ResettingTimerSnapshot) Mean() float64 { - if !t.calculated { - t.calc([]float64{}) - } - - return t.mean -} - -func (t *ResettingTimerSnapshot) calc(percentiles []float64) { - sort.Sort(Int64Slice(t.values)) - - count := len(t.values) - if count > 0 { - min := t.values[0] - max := t.values[count-1] - - cumulativeValues := make([]int64, count) - cumulativeValues[0] = min - for i := 1; i < count; i++ { - cumulativeValues[i] = t.values[i] + cumulativeValues[i-1] - } - - t.thresholdBoundaries = make([]int64, len(percentiles)) - - thresholdBoundary := max - - for i, pct := range percentiles { - if count > 1 { - var abs float64 - if pct >= 0 { - abs = pct - } else { - abs = 100 + pct - } - // poor man's math.Round(x): - // math.Floor(x + 0.5) - indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5)) - if pct >= 0 && indexOfPerc > 0 { - indexOfPerc -= 1 // index offset=0 - } - thresholdBoundary = t.values[indexOfPerc] - } - - t.thresholdBoundaries[i] = thresholdBoundary - } - - sum := cumulativeValues[count-1] - t.mean = float64(sum) / float64(count) - } else { - t.thresholdBoundaries = make([]int64, len(percentiles)) - t.mean = 0 - } - - t.calculated = true -} - -// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order. -type Int64Slice []int64 - -func (s Int64Slice) Len() int { return len(s) } -func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/runtime.go b/vendor/github.com/ethereum/go-ethereum/metrics/runtime.go deleted file mode 100644 index 9450c47..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/runtime.go +++ /dev/null @@ -1,212 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/pprof" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - GCCPUFraction GaugeFloat64 - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - NumThread Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 - - threadCreateProfile = pprof.Lookup("threadcreate") -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) - - runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_cgo.go b/vendor/github.com/ethereum/go-ethereum/metrics/runtime_cgo.go deleted file mode 100644 index e3391f4..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build cgo -// +build !appengine - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_gccpufraction.go b/vendor/github.com/ethereum/go-ethereum/metrics/runtime_gccpufraction.go deleted file mode 100644 index ca12c05..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return memStats.GCCPUFraction -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_cgo.go b/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_cgo.go deleted file mode 100644 index 616a3b4..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !cgo appengine - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_gccpufraction.go b/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_gccpufraction.go deleted file mode 100644 index be96aa6..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/runtime_no_gccpufraction.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return 0 -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/sample.go b/vendor/github.com/ethereum/go-ethereum/metrics/sample.go deleted file mode 100644 index fa2bfb2..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/sample.go +++ /dev/null @@ -1,616 +0,0 @@ -package metrics - -import ( - "math" - "math/rand" - "sort" - "sync" - "time" -) - -const rescaleThreshold = time.Hour - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - Snapshot() Sample - StdDev() float64 - Sum() int64 - Update(int64) - Values() []int64 - Variance() float64 -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values *expDecaySampleHeap -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if !Enabled { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: newExpDecaySampleHeap(reservoirSize), - } - s.t1 = s.t0.Add(rescaleThreshold) - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values.Clear() -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.values.Size() -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - vals := s.values.Values() - values := make([]int64, len(vals)) - for i, v := range vals { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if s.values.Size() == s.reservoirSize { - s.values.Pop() - } - s.values.Push(expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), - v: v, - }) - if t.After(s.t1) { - values := s.values.Values() - t0 := s.t0 - s.values.Clear() - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) - s.values.Push(v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if len(values) == 0 { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if len(values) == 0 { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if len(values) == 0 { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} - -// SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] -} - -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size > 0 { - sort.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - } - return scores -} - -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { - count int64 - values []int64 -} - -func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { - return &SampleSnapshot{ - count: count, - values: values, - } -} - -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} - -// Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v - } - return sum -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { - if len(values) == 0 { - return 0.0 - } - m := SampleMean(values) - var sum float64 - for _, v := range values { - d := float64(v) - m - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if !Enabled { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - r := rand.Int63n(s.count) - if r < int64(len(s.values)) { - s.values[int(r)] = v - } - } -} - -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { - return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -// The internal implementation is copied from the standard library's container/heap -type expDecaySampleHeap struct { - s []expDecaySample -} - -func (h *expDecaySampleHeap) Clear() { - h.s = h.s[:0] -} - -func (h *expDecaySampleHeap) Push(s expDecaySample) { - n := len(h.s) - h.s = h.s[0 : n+1] - h.s[n] = s - h.up(n) -} - -func (h *expDecaySampleHeap) Pop() expDecaySample { - n := len(h.s) - 1 - h.s[0], h.s[n] = h.s[n], h.s[0] - h.down(0, n) - - n = len(h.s) - s := h.s[n-1] - h.s = h.s[0 : n-1] - return s -} - -func (h *expDecaySampleHeap) Size() int { - return len(h.s) -} - -func (h *expDecaySampleHeap) Values() []expDecaySample { - return h.s -} - -func (h *expDecaySampleHeap) up(j int) { - for { - i := (j - 1) / 2 // parent - if i == j || !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - j = i - } -} - -func (h *expDecaySampleHeap) down(i, n int) { - for { - j1 := 2*i + 1 - if j1 >= n || j1 < 0 { // j1 < 0 after int overflow - break - } - j := j1 // left child - if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { - j = j2 // = 2*i + 2 // right child - } - if !(h.s[j].k < h.s[i].k) { - break - } - h.s[i], h.s[j] = h.s[j], h.s[i] - i = j - } -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/syslog.go b/vendor/github.com/ethereum/go-ethereum/metrics/syslog.go deleted file mode 100644 index a0ed4b1..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/syslog.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/timer.go b/vendor/github.com/ethereum/go-ethereum/metrics/timer.go deleted file mode 100644 index a63c9df..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/timer.go +++ /dev/null @@ -1,326 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Timers capture the duration and rate of events. -type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 - Stop() - Sum() int64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) - Variance() float64 -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewCustomTimer(h Histogram, m Meter) Timer { - if !Enabled { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -// Be sure to unregister the meter from the registry once it is of no use to -// allow for garbage collection. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -// Be sure to call Stop() once the timer is of no use to allow for garbage collection. -func NewTimer() Timer { - if !Enabled { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct{} - -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Stop is a no-op. -func (NilTimer) Stop() {} - -// Sum is a no-op. -func (NilTimer) Sum() int64 { return 0 } - -// Time is a no-op. -func (NilTimer) Time(func()) {} - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { - t.mutex.Lock() - defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - -// Stop stops the meter. -func (t *StandardTimer) Stop() { - t.meter.Stop() -} - -// Sum returns the sum in the sample. -func (t *StandardTimer) Sum() int64 { - return t.histogram.Sum() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(d)) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(time.Since(ts))) - t.meter.Mark(1) -} - -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Stop is a no-op. -func (t *TimerSnapshot) Stop() {} - -// Sum returns the sum at the time the snapshot was taken. -func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/validate.sh b/vendor/github.com/ethereum/go-ethereum/metrics/validate.sh deleted file mode 100644 index c4ae91e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/validate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e - -# check there are no formatting issues -GOFMT_LINES=`gofmt -l . | wc -l | xargs` -test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" - -# run the tests for the root package -go test -race . diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/writer.go b/vendor/github.com/ethereum/go-ethereum/metrics/writer.go deleted file mode 100644 index 88521a8..0000000 --- a/vendor/github.com/ethereum/go-ethereum/metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "time" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - - sort.Sort(namedMetrics) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name -} diff --git a/vendor/github.com/ethereum/go-ethereum/params/bootnodes.go b/vendor/github.com/ethereum/go-ethereum/params/bootnodes.go deleted file mode 100644 index f27e5b7..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/bootnodes.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import "github.com/ethereum/go-ethereum/common" - -// MainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on -// the main Ethereum network. -var MainnetBootnodes = []string{ - // Ethereum Foundation Go Bootnodes - "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001 - "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001 - "enode://ca6de62fce278f96aea6ec5a2daadb877e51651247cb96ee310a318def462913b653963c155a0ef6c7d50048bba6e6cea881130857413d9f50a621546b590758@34.255.23.113:30303", // bootnode-aws-eu-west-1-001 - "enode://279944d8dcd428dffaa7436f25ca0ca43ae19e7bcf94a8fb7d1641651f92d121e972ac2e8f381414b80cc8e5555811c2ec6e1a99bb009b3f53c4c69923e11bd8@35.158.244.151:30303", // bootnode-aws-eu-central-1-001 - "enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303", // bootnode-azure-australiaeast-001 - "enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", // bootnode-azure-brazilsouth-001 - "enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", // bootnode-azure-koreasouth-001 - "enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", // bootnode-azure-westus-001 -} - -// TestnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the -// Ropsten test network. -var TestnetBootnodes = []string{ - "enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", // US-Azure geth - "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", // US-Azure parity - "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", // Parity - "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303", // @gpip -} - -// RinkebyBootnodes are the enode URLs of the P2P bootstrap nodes running on the -// Rinkeby test network. -var RinkebyBootnodes = []string{ - "enode://a24ac7c5484ef4ed0c5eb2d36620ba4e4aa13b8c84684e1b4aab0cebea2ae45cb4d375b77eab56516d34bfbd3c1a833fc51296ff084b770b94fb9028c4d25ccf@52.169.42.101:30303", // IE - "enode://343149e4feefa15d882d9fe4ac7d88f885bd05ebb735e547f12e12080a9fa07c8014ca6fd7f373123488102fe5e34111f8509cf0b7de3f5b44339c9f25e87cb8@52.3.158.184:30303", // INFURA - "enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303", // AKASHA -} - -// GoerliBootnodes are the enode URLs of the P2P bootstrap nodes running on the -// Görli test network. -var GoerliBootnodes = []string{ - // Upstream bootnodes - "enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303", - "enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303", - "enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313", - "enode://c1f8b7c2ac4453271fa07d8e9ecf9a2e8285aa0bd0c07df0131f47153306b0736fd3db8924e7a9bf0bed6b1d8d4f87362a71b033dc7c64547728d953e43e59b2@52.64.155.147:30303", - "enode://f4a9c6ee28586009fb5a96c8af13a58ed6d8315a9eee4772212c1d4d9cebe5a8b8a78ea4434f318726317d04a3f531a1ef0420cf9752605a562cfe858c46e263@213.186.16.82:30303", - - // Ethereum Foundation bootnode - "enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303", -} - -// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the -// experimental RLPx v5 topic-discovery network. -var DiscoveryV5Bootnodes = []string{ - "enode://06051a5573c81934c9554ef2898eb13b33a34b94cf36b202b69fde139ca17a85051979867720d4bdae4323d4943ddf9aeeb6643633aa656e0be843659795007a@35.177.226.168:30303", - "enode://0cc5f5ffb5d9098c8b8c62325f3797f56509bff942704687b6530992ac706e2cb946b90a34f1f19548cd3c7baccbcaea354531e5983c7d1bc0dee16ce4b6440b@40.118.3.223:30304", - "enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30306", - "enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30307", -} - -const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@" - -// These DNS names provide bootstrap connectivity for public testnets and the mainnet. -// See https://github.com/ethereum/discv4-dns-lists for more information. -var KnownDNSNetworks = map[common.Hash]string{ - MainnetGenesisHash: dnsPrefix + "all.mainnet.ethdisco.net", - TestnetGenesisHash: dnsPrefix + "all.ropsten.ethdisco.net", - RinkebyGenesisHash: dnsPrefix + "all.rinkeby.ethdisco.net", - GoerliGenesisHash: dnsPrefix + "all.goerli.ethdisco.net", -} diff --git a/vendor/github.com/ethereum/go-ethereum/params/config.go b/vendor/github.com/ethereum/go-ethereum/params/config.go deleted file mode 100644 index bcbde40..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/config.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import ( - "encoding/binary" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// Genesis hashes to enforce below configs on. -var ( - MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") - RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") - GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") -) - -// TrustedCheckpoints associates each known checkpoint with the genesis hash of -// the chain it belongs to. -var TrustedCheckpoints = map[common.Hash]*TrustedCheckpoint{ - MainnetGenesisHash: MainnetTrustedCheckpoint, - TestnetGenesisHash: TestnetTrustedCheckpoint, - RinkebyGenesisHash: RinkebyTrustedCheckpoint, - GoerliGenesisHash: GoerliTrustedCheckpoint, -} - -// CheckpointOracles associates each known checkpoint oracles with the genesis hash of -// the chain it belongs to. -var CheckpointOracles = map[common.Hash]*CheckpointOracleConfig{ - MainnetGenesisHash: MainnetCheckpointOracle, - TestnetGenesisHash: TestnetCheckpointOracle, - RinkebyGenesisHash: RinkebyCheckpointOracle, - GoerliGenesisHash: GoerliCheckpointOracle, -} - -var ( - // MainnetChainConfig is the chain parameters to run a node on the main network. - MainnetChainConfig = &ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(1150000), - DAOForkBlock: big.NewInt(1920000), - DAOForkSupport: true, - EIP150Block: big.NewInt(2463000), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), - EIP155Block: big.NewInt(2675000), - EIP158Block: big.NewInt(2675000), - ByzantiumBlock: big.NewInt(4370000), - ConstantinopleBlock: big.NewInt(7280000), - PetersburgBlock: big.NewInt(7280000), - IstanbulBlock: big.NewInt(9069000), - MuirGlacierBlock: big.NewInt(9200000), - Ethash: new(EthashConfig), - } - - // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. - MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 289, - SectionHead: common.HexToHash("0x5a95eed1a6e01d58b59f86c754cda88e8d6bede65428530eb0bec03267cda6a9"), - CHTRoot: common.HexToHash("0x6d4abf2b0f3c015952e6a3cbd5cc9885aacc29b8e55d4de662d29783c74a62bf"), - BloomRoot: common.HexToHash("0x1af2a8abbaca8048136b02f782cb6476ab546313186a1d1bd2b02df88ea48e7e"), - } - - // MainnetCheckpointOracle contains a set of configs for the main network oracle. - MainnetCheckpointOracle = &CheckpointOracleConfig{ - Address: common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), - Signers: []common.Address{ - common.HexToAddress("0x1b2C260efc720BE89101890E4Db589b44E950527"), // Peter - common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin - common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt - common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary - common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume - }, - Threshold: 2, - } - - // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. - TestnetChainConfig = &ChainConfig{ - ChainID: big.NewInt(3), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"), - EIP155Block: big.NewInt(10), - EIP158Block: big.NewInt(10), - ByzantiumBlock: big.NewInt(1700000), - ConstantinopleBlock: big.NewInt(4230000), - PetersburgBlock: big.NewInt(4939394), - IstanbulBlock: big.NewInt(6485846), - MuirGlacierBlock: big.NewInt(7117117), - Ethash: new(EthashConfig), - } - - // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. - TestnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 223, - SectionHead: common.HexToHash("0x9aa51ca383f5075f816e0b8ce7125075cd562b918839ee286c03770722147661"), - CHTRoot: common.HexToHash("0x755c6a5931b7bd36e55e47f3f1e81fa79c930ae15c55682d3a85931eedaf8cf2"), - BloomRoot: common.HexToHash("0xabc37762d11b29dc7dde11b89846e2308ba681eeb015b6a202ef5e242bc107e8"), - } - - // TestnetCheckpointOracle contains a set of configs for the Ropsten test network oracle. - TestnetCheckpointOracle = &CheckpointOracleConfig{ - Address: common.HexToAddress("0xEF79475013f154E6A65b54cB2742867791bf0B84"), - Signers: []common.Address{ - common.HexToAddress("0x32162F3581E88a5f62e8A61892B42C46E2c18f7b"), // Peter - common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin - common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt - common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary - common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume - }, - Threshold: 2, - } - - // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. - RinkebyChainConfig = &ChainConfig{ - ChainID: big.NewInt(4), - HomesteadBlock: big.NewInt(1), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(2), - EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"), - EIP155Block: big.NewInt(3), - EIP158Block: big.NewInt(3), - ByzantiumBlock: big.NewInt(1035301), - ConstantinopleBlock: big.NewInt(3660663), - PetersburgBlock: big.NewInt(4321234), - IstanbulBlock: big.NewInt(5435345), - Clique: &CliqueConfig{ - Period: 15, - Epoch: 30000, - }, - } - - // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. - RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 181, - SectionHead: common.HexToHash("0xdda275f3e9ecadf4834a6a682db1ca3db6945fa4014c82dadcad032fc5c1aefa"), - CHTRoot: common.HexToHash("0x0fdfdbdb12e947e838fe26dd3ada4cc3092d6fa22aefec719b83f16004b5e596"), - BloomRoot: common.HexToHash("0xfd8dc404a438eaa5cf93dd58dbaeed648aa49d563b511892262acff77c5db7db"), - } - - // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. - RinkebyCheckpointOracle = &CheckpointOracleConfig{ - Address: common.HexToAddress("0xebe8eFA441B9302A0d7eaECc277c09d20D684540"), - Signers: []common.Address{ - common.HexToAddress("0xd9c9cd5f6779558b6e0ed4e6acf6b1947e7fa1f3"), // Peter - common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin - common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt - common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary - }, - Threshold: 2, - } - - // GoerliChainConfig contains the chain parameters to run a node on the Görli test network. - GoerliChainConfig = &ChainConfig{ - ChainID: big.NewInt(5), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(1561651), - Clique: &CliqueConfig{ - Period: 15, - Epoch: 30000, - }, - } - - // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. - GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 66, - SectionHead: common.HexToHash("0xeea3a7b2cb275956f3049dd27e6cdacd8a6ef86738d593d556efee5361019475"), - CHTRoot: common.HexToHash("0x11712af50b4083dc5910e452ca69fbfc0f2940770b9846200a573f87a0af94e6"), - BloomRoot: common.HexToHash("0x331b7a7b273e81daeac8cafb9952a16669d7facc7be3b0ebd3a792b4d8b95cc5"), - } - - // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. - GoerliCheckpointOracle = &CheckpointOracleConfig{ - Address: common.HexToAddress("0x18CA0E045F0D772a851BC7e48357Bcaab0a0795D"), - Signers: []common.Address{ - common.HexToAddress("0x4769bcaD07e3b938B7f43EB7D278Bc7Cb9efFb38"), // Peter - common.HexToAddress("0x78d1aD571A1A09D60D9BBf25894b44e4C8859595"), // Martin - common.HexToAddress("0x286834935f4A8Cfb4FF4C77D5770C2775aE2b0E7"), // Zsolt - common.HexToAddress("0xb86e2B0Ab5A4B1373e40c51A7C712c70Ba2f9f8E"), // Gary - common.HexToAddress("0x0DF8fa387C602AE62559cC4aFa4972A7045d6707"), // Guillaume - }, - Threshold: 2, - } - - // AllEthashProtocolChanges contains every protocol change (EIPs) introduced - // and accepted by the Ethereum core developers into the Ethash consensus. - // - // This configuration is intentionally not using keyed fields to force anyone - // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} - - // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced - // and accepted by the Ethereum core developers into the Clique consensus. - // - // This configuration is intentionally not using keyed fields to force anyone - // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} - TestRules = TestChainConfig.Rules(new(big.Int)) -) - -// TrustedCheckpoint represents a set of post-processed trie roots (CHT and -// BloomTrie) associated with the appropriate section index and head hash. It is -// used to start light syncing from this checkpoint and avoid downloading the -// entire header chain while still being able to securely access old headers/logs. -type TrustedCheckpoint struct { - SectionIndex uint64 `json:"sectionIndex"` - SectionHead common.Hash `json:"sectionHead"` - CHTRoot common.Hash `json:"chtRoot"` - BloomRoot common.Hash `json:"bloomRoot"` -} - -// HashEqual returns an indicator comparing the itself hash with given one. -func (c *TrustedCheckpoint) HashEqual(hash common.Hash) bool { - if c.Empty() { - return hash == common.Hash{} - } - return c.Hash() == hash -} - -// Hash returns the hash of checkpoint's four key fields(index, sectionHead, chtRoot and bloomTrieRoot). -func (c *TrustedCheckpoint) Hash() common.Hash { - buf := make([]byte, 8+3*common.HashLength) - binary.BigEndian.PutUint64(buf, c.SectionIndex) - copy(buf[8:], c.SectionHead.Bytes()) - copy(buf[8+common.HashLength:], c.CHTRoot.Bytes()) - copy(buf[8+2*common.HashLength:], c.BloomRoot.Bytes()) - return crypto.Keccak256Hash(buf) -} - -// Empty returns an indicator whether the checkpoint is regarded as empty. -func (c *TrustedCheckpoint) Empty() bool { - return c.SectionHead == (common.Hash{}) || c.CHTRoot == (common.Hash{}) || c.BloomRoot == (common.Hash{}) -} - -// CheckpointOracleConfig represents a set of checkpoint contract(which acts as an oracle) -// config which used for light client checkpoint syncing. -type CheckpointOracleConfig struct { - Address common.Address `json:"address"` - Signers []common.Address `json:"signers"` - Threshold uint64 `json:"threshold"` -} - -// ChainConfig is the core config which determines the blockchain settings. -// -// ChainConfig is stored in the database on a per block basis. This means -// that any network, identified by its genesis block, can have its own -// set of configuration options. -type ChainConfig struct { - ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection - - HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead) - - DAOForkBlock *big.Int `json:"daoForkBlock,omitempty"` // TheDAO hard-fork switch block (nil = no fork) - DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork - - // EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150) - EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork) - EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed) - - EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block - EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block - - ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium) - ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated) - PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople) - IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul) - MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) - EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) - - // Various consensus engines - Ethash *EthashConfig `json:"ethash,omitempty"` - Clique *CliqueConfig `json:"clique,omitempty"` -} - -// EthashConfig is the consensus engine configs for proof-of-work based sealing. -type EthashConfig struct{} - -// String implements the stringer interface, returning the consensus engine details. -func (c *EthashConfig) String() string { - return "ethash" -} - -// CliqueConfig is the consensus engine configs for proof-of-authority based sealing. -type CliqueConfig struct { - Period uint64 `json:"period"` // Number of seconds between blocks to enforce - Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint -} - -// String implements the stringer interface, returning the consensus engine details. -func (c *CliqueConfig) String() string { - return "clique" -} - -// String implements the fmt.Stringer interface. -func (c *ChainConfig) String() string { - var engine interface{} - switch { - case c.Ethash != nil: - engine = c.Ethash - case c.Clique != nil: - engine = c.Clique - default: - engine = "unknown" - } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Engine: %v}", - c.ChainID, - c.HomesteadBlock, - c.DAOForkBlock, - c.DAOForkSupport, - c.EIP150Block, - c.EIP155Block, - c.EIP158Block, - c.ByzantiumBlock, - c.ConstantinopleBlock, - c.PetersburgBlock, - c.IstanbulBlock, - c.MuirGlacierBlock, - engine, - ) -} - -// IsHomestead returns whether num is either equal to the homestead block or greater. -func (c *ChainConfig) IsHomestead(num *big.Int) bool { - return isForked(c.HomesteadBlock, num) -} - -// IsDAOFork returns whether num is either equal to the DAO fork block or greater. -func (c *ChainConfig) IsDAOFork(num *big.Int) bool { - return isForked(c.DAOForkBlock, num) -} - -// IsEIP150 returns whether num is either equal to the EIP150 fork block or greater. -func (c *ChainConfig) IsEIP150(num *big.Int) bool { - return isForked(c.EIP150Block, num) -} - -// IsEIP155 returns whether num is either equal to the EIP155 fork block or greater. -func (c *ChainConfig) IsEIP155(num *big.Int) bool { - return isForked(c.EIP155Block, num) -} - -// IsEIP158 returns whether num is either equal to the EIP158 fork block or greater. -func (c *ChainConfig) IsEIP158(num *big.Int) bool { - return isForked(c.EIP158Block, num) -} - -// IsByzantium returns whether num is either equal to the Byzantium fork block or greater. -func (c *ChainConfig) IsByzantium(num *big.Int) bool { - return isForked(c.ByzantiumBlock, num) -} - -// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater. -func (c *ChainConfig) IsConstantinople(num *big.Int) bool { - return isForked(c.ConstantinopleBlock, num) -} - -// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater. -func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool { - return isForked(c.MuirGlacierBlock, num) -} - -// IsPetersburg returns whether num is either -// - equal to or greater than the PetersburgBlock fork block, -// - OR is nil, and Constantinople is active -func (c *ChainConfig) IsPetersburg(num *big.Int) bool { - return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num) -} - -// IsIstanbul returns whether num is either equal to the Istanbul fork block or greater. -func (c *ChainConfig) IsIstanbul(num *big.Int) bool { - return isForked(c.IstanbulBlock, num) -} - -// IsEWASM returns whether num represents a block number after the EWASM fork -func (c *ChainConfig) IsEWASM(num *big.Int) bool { - return isForked(c.EWASMBlock, num) -} - -// CheckCompatible checks whether scheduled fork transitions have been imported -// with a mismatching chain configuration. -func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError { - bhead := new(big.Int).SetUint64(height) - - // Iterate checkCompatible to find the lowest conflict. - var lasterr *ConfigCompatError - for { - err := c.checkCompatible(newcfg, bhead) - if err == nil || (lasterr != nil && err.RewindTo == lasterr.RewindTo) { - break - } - lasterr = err - bhead.SetUint64(err.RewindTo) - } - return lasterr -} - -// CheckConfigForkOrder checks that we don't "skip" any forks, geth isn't pluggable enough -// to guarantee that forks can be implemented in a different order than on official networks -func (c *ChainConfig) CheckConfigForkOrder() error { - type fork struct { - name string - block *big.Int - } - var lastFork fork - for _, cur := range []fork{ - {"homesteadBlock", c.HomesteadBlock}, - {"eip150Block", c.EIP150Block}, - {"eip155Block", c.EIP155Block}, - {"eip158Block", c.EIP158Block}, - {"byzantiumBlock", c.ByzantiumBlock}, - {"constantinopleBlock", c.ConstantinopleBlock}, - {"petersburgBlock", c.PetersburgBlock}, - {"istanbulBlock", c.IstanbulBlock}, - {"muirGlacierBlock", c.MuirGlacierBlock}, - } { - if lastFork.name != "" { - // Next one must be higher number - if lastFork.block == nil && cur.block != nil { - return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v", - lastFork.name, cur.name, cur.block) - } - if lastFork.block != nil && cur.block != nil { - if lastFork.block.Cmp(cur.block) > 0 { - return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v", - lastFork.name, lastFork.block, cur.name, cur.block) - } - } - } - lastFork = cur - } - return nil -} - -func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *ConfigCompatError { - if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) { - return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock) - } - if isForkIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, head) { - return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock) - } - if c.IsDAOFork(head) && c.DAOForkSupport != newcfg.DAOForkSupport { - return newCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock) - } - if isForkIncompatible(c.EIP150Block, newcfg.EIP150Block, head) { - return newCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block) - } - if isForkIncompatible(c.EIP155Block, newcfg.EIP155Block, head) { - return newCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block) - } - if isForkIncompatible(c.EIP158Block, newcfg.EIP158Block, head) { - return newCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block) - } - if c.IsEIP158(head) && !configNumEqual(c.ChainID, newcfg.ChainID) { - return newCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block) - } - if isForkIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, head) { - return newCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock) - } - if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) { - return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock) - } - if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) { - return newCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock) - } - if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) { - return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock) - } - if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) { - return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock) - } - if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) { - return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock) - } - return nil -} - -// isForkIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to -// block s2 because head is already past the fork. -func isForkIncompatible(s1, s2, head *big.Int) bool { - return (isForked(s1, head) || isForked(s2, head)) && !configNumEqual(s1, s2) -} - -// isForked returns whether a fork scheduled at block s is active at the given head block. -func isForked(s, head *big.Int) bool { - if s == nil || head == nil { - return false - } - return s.Cmp(head) <= 0 -} - -func configNumEqual(x, y *big.Int) bool { - if x == nil { - return y == nil - } - if y == nil { - return x == nil - } - return x.Cmp(y) == 0 -} - -// ConfigCompatError is raised if the locally-stored blockchain is initialised with a -// ChainConfig that would alter the past. -type ConfigCompatError struct { - What string - // block numbers of the stored and new configurations - StoredConfig, NewConfig *big.Int - // the block number to which the local chain must be rewound to correct the error - RewindTo uint64 -} - -func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError { - var rew *big.Int - switch { - case storedblock == nil: - rew = newblock - case newblock == nil || storedblock.Cmp(newblock) < 0: - rew = storedblock - default: - rew = newblock - } - err := &ConfigCompatError{what, storedblock, newblock, 0} - if rew != nil && rew.Sign() > 0 { - err.RewindTo = rew.Uint64() - 1 - } - return err -} - -func (err *ConfigCompatError) Error() string { - return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo) -} - -// Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions -// that do not have or require information about the block. -// -// Rules is a one time interface meaning that it shouldn't be used in between transition -// phases. -type Rules struct { - ChainID *big.Int - IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool - IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool -} - -// Rules ensures c's ChainID is not nil. -func (c *ChainConfig) Rules(num *big.Int) Rules { - chainID := c.ChainID - if chainID == nil { - chainID = new(big.Int) - } - return Rules{ - ChainID: new(big.Int).Set(chainID), - IsHomestead: c.IsHomestead(num), - IsEIP150: c.IsEIP150(num), - IsEIP155: c.IsEIP155(num), - IsEIP158: c.IsEIP158(num), - IsByzantium: c.IsByzantium(num), - IsConstantinople: c.IsConstantinople(num), - IsPetersburg: c.IsPetersburg(num), - IsIstanbul: c.IsIstanbul(num), - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/params/dao.go b/vendor/github.com/ethereum/go-ethereum/params/dao.go deleted file mode 100644 index da3c8df..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/dao.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -// DAOForkBlockExtra is the block header extra-data field to set for the DAO fork -// point and a number of consecutive blocks to allow fast/light syncers to correctly -// pick the side they want ("dao-hard-fork"). -var DAOForkBlockExtra = common.FromHex("0x64616f2d686172642d666f726b") - -// DAOForkExtraRange is the number of consecutive blocks from the DAO fork point -// to override the extra-data in to prevent no-fork attacks. -var DAOForkExtraRange = big.NewInt(10) - -// DAORefundContract is the address of the refund contract to send DAO balances to. -var DAORefundContract = common.HexToAddress("0xbf4ed7b27f1d666546e30d74d50d173d20bca754") - -// DAODrainList is the list of accounts whose full balances will be moved into a -// refund contract at the beginning of the dao-fork block. -func DAODrainList() []common.Address { - return []common.Address{ - common.HexToAddress("0xd4fe7bc31cedb7bfb8a345f31e668033056b2728"), - common.HexToAddress("0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425"), - common.HexToAddress("0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f"), - common.HexToAddress("0xecd135fa4f61a655311e86238c92adcd779555d2"), - common.HexToAddress("0x1975bd06d486162d5dc297798dfc41edd5d160a7"), - common.HexToAddress("0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6"), - common.HexToAddress("0x319f70bab6845585f412ec7724b744fec6095c85"), - common.HexToAddress("0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936"), - common.HexToAddress("0x5c8536898fbb74fc7445814902fd08422eac56d0"), - common.HexToAddress("0x6966ab0d485353095148a2155858910e0965b6f9"), - common.HexToAddress("0x779543a0491a837ca36ce8c635d6154e3c4911a6"), - common.HexToAddress("0x2a5ed960395e2a49b1c758cef4aa15213cfd874c"), - common.HexToAddress("0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5"), - common.HexToAddress("0x9c50426be05db97f5d64fc54bf89eff947f0a321"), - common.HexToAddress("0x200450f06520bdd6c527622a273333384d870efb"), - common.HexToAddress("0xbe8539bfe837b67d1282b2b1d61c3f723966f049"), - common.HexToAddress("0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb"), - common.HexToAddress("0xf1385fb24aad0cd7432824085e42aff90886fef5"), - common.HexToAddress("0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091"), - common.HexToAddress("0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd"), - common.HexToAddress("0x51e0ddd9998364a2eb38588679f0d2c42653e4a6"), - common.HexToAddress("0x627a0a960c079c21c34f7612d5d230e01b4ad4c7"), - common.HexToAddress("0xf0b1aa0eb660754448a7937c022e30aa692fe0c5"), - common.HexToAddress("0x24c4d950dfd4dd1902bbed3508144a54542bba94"), - common.HexToAddress("0x9f27daea7aca0aa0446220b98d028715e3bc803d"), - common.HexToAddress("0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90"), - common.HexToAddress("0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b"), - common.HexToAddress("0x63ed5a272de2f6d968408b4acb9024f4cc208ebf"), - common.HexToAddress("0x6f6704e5a10332af6672e50b3d9754dc460dfa4d"), - common.HexToAddress("0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6"), - common.HexToAddress("0x492ea3bb0f3315521c31f273e565b868fc090f17"), - common.HexToAddress("0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00"), - common.HexToAddress("0x9ea779f907f0b315b364b0cfc39a0fde5b02a416"), - common.HexToAddress("0xceaeb481747ca6c540a000c1f3641f8cef161fa7"), - common.HexToAddress("0xcc34673c6c40e791051898567a1222daf90be287"), - common.HexToAddress("0x579a80d909f346fbfb1189493f521d7f48d52238"), - common.HexToAddress("0xe308bd1ac5fda103967359b2712dd89deffb7973"), - common.HexToAddress("0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c"), - common.HexToAddress("0xac1ecab32727358dba8962a0f3b261731aad9723"), - common.HexToAddress("0x4fd6ace747f06ece9c49699c7cabc62d02211f75"), - common.HexToAddress("0x440c59b325d2997a134c2c7c60a8c61611212bad"), - common.HexToAddress("0x4486a3d68fac6967006d7a517b889fd3f98c102b"), - common.HexToAddress("0x9c15b54878ba618f494b38f0ae7443db6af648ba"), - common.HexToAddress("0x27b137a85656544b1ccb5a0f2e561a5703c6a68f"), - common.HexToAddress("0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241"), - common.HexToAddress("0x23b75c2f6791eef49c69684db4c6c1f93bf49a50"), - common.HexToAddress("0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b"), - common.HexToAddress("0xb9637156d330c0d605a791f1c31ba5890582fe1c"), - common.HexToAddress("0x6131c42fa982e56929107413a9d526fd99405560"), - common.HexToAddress("0x1591fc0f688c81fbeb17f5426a162a7024d430c2"), - common.HexToAddress("0x542a9515200d14b68e934e9830d91645a980dd7a"), - common.HexToAddress("0xc4bbd073882dd2add2424cf47d35213405b01324"), - common.HexToAddress("0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4"), - common.HexToAddress("0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb"), - common.HexToAddress("0x3ba4d81db016dc2890c81f3acec2454bff5aada5"), - common.HexToAddress("0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab"), - common.HexToAddress("0xe4ae1efdfc53b73893af49113d8694a057b9c0d1"), - common.HexToAddress("0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5"), - common.HexToAddress("0x0737a6b837f97f46ebade41b9bc3e1c509c85c53"), - common.HexToAddress("0x97f43a37f595ab5dd318fb46e7a155eae057317a"), - common.HexToAddress("0x52c5317c848ba20c7504cb2c8052abd1fde29d03"), - common.HexToAddress("0x4863226780fe7c0356454236d3b1c8792785748d"), - common.HexToAddress("0x5d2b2e6fcbe3b11d26b525e085ff818dae332479"), - common.HexToAddress("0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c"), - common.HexToAddress("0x057b56736d32b86616a10f619859c6cd6f59092a"), - common.HexToAddress("0x9aa008f65de0b923a2a4f02012ad034a5e2e2192"), - common.HexToAddress("0x304a554a310c7e546dfe434669c62820b7d83490"), - common.HexToAddress("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79"), - common.HexToAddress("0x4deb0033bb26bc534b197e61d19e0733e5679784"), - common.HexToAddress("0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a"), - common.HexToAddress("0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b"), - common.HexToAddress("0x4fa802324e929786dbda3b8820dc7834e9134a2a"), - common.HexToAddress("0x9da397b9e80755301a3b32173283a91c0ef6c87e"), - common.HexToAddress("0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6"), - common.HexToAddress("0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9"), - common.HexToAddress("0x5dc28b15dffed94048d73806ce4b7a4612a1d48f"), - common.HexToAddress("0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76"), - common.HexToAddress("0x12e626b0eebfe86a56d633b9864e389b45dcb260"), - common.HexToAddress("0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7"), - common.HexToAddress("0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5"), - common.HexToAddress("0xd164b088bd9108b60d0ca3751da4bceb207b0782"), - common.HexToAddress("0x6231b6d0d5e77fe001c2a460bd9584fee60d409b"), - common.HexToAddress("0x1cba23d343a983e9b5cfd19496b9a9701ada385f"), - common.HexToAddress("0xa82f360a8d3455c5c41366975bde739c37bfeb8a"), - common.HexToAddress("0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339"), - common.HexToAddress("0x005f5cee7a43331d5a3d3eec71305925a62f34b6"), - common.HexToAddress("0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d"), - common.HexToAddress("0xd131637d5275fd1a68a3200f4ad25c71a2a9522e"), - common.HexToAddress("0xbc07118b9ac290e4622f5e77a0853539789effbe"), - common.HexToAddress("0x47e7aa56d6bdf3f36be34619660de61275420af8"), - common.HexToAddress("0xacd87e28b0c9d1254e868b81cba4cc20d9a32225"), - common.HexToAddress("0xadf80daec7ba8dcf15392f1ac611fff65d94f880"), - common.HexToAddress("0x5524c55fb03cf21f549444ccbecb664d0acad706"), - common.HexToAddress("0x40b803a9abce16f50f36a77ba41180eb90023925"), - common.HexToAddress("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f"), - common.HexToAddress("0x17802f43a0137c506ba92291391a8a8f207f487d"), - common.HexToAddress("0x253488078a4edf4d6f42f113d1e62836a942cf1a"), - common.HexToAddress("0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915"), - common.HexToAddress("0xb136707642a4ea12fb4bae820f03d2562ebff487"), - common.HexToAddress("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940"), - common.HexToAddress("0xf14c14075d6c4ed84b86798af0956deef67365b5"), - common.HexToAddress("0xca544e5c4687d109611d0f8f928b53a25af72448"), - common.HexToAddress("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c"), - common.HexToAddress("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7"), - common.HexToAddress("0x6d87578288b6cb5549d5076a207456a1f6a63dc0"), - common.HexToAddress("0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e"), - common.HexToAddress("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6"), - common.HexToAddress("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2"), - common.HexToAddress("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a"), - common.HexToAddress("0xd343b217de44030afaa275f54d31a9317c7f441e"), - common.HexToAddress("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106"), - common.HexToAddress("0xda2fef9e4a3230988ff17df2165440f37e8b1708"), - common.HexToAddress("0xf4c64518ea10f995918a454158c6b61407ea345c"), - common.HexToAddress("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97"), - common.HexToAddress("0xbb9bc244d798123fde783fcc1c72d3bb8c189413"), - common.HexToAddress("0x807640a13483f8ac783c557fcdf27be11ea4ac7a"), - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/params/denomination.go b/vendor/github.com/ethereum/go-ethereum/params/denomination.go deleted file mode 100644 index fb4da7f..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/denomination.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -// These are the multipliers for ether denominations. -// Example: To get the wei value of an amount in 'gwei', use -// -// new(big.Int).Mul(value, big.NewInt(params.GWei)) -// -const ( - Wei = 1 - GWei = 1e9 - Ether = 1e18 -) diff --git a/vendor/github.com/ethereum/go-ethereum/params/network_params.go b/vendor/github.com/ethereum/go-ethereum/params/network_params.go deleted file mode 100644 index bba2472..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/network_params.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -// These are network parameters that need to be constant between clients, but -// aren't necessarily consensus related. - -const ( - // BloomBitsBlocks is the number of blocks a single bloom bit section vector - // contains on the server side. - BloomBitsBlocks uint64 = 4096 - - // BloomBitsBlocksClient is the number of blocks a single bloom bit section vector - // contains on the light client side - BloomBitsBlocksClient uint64 = 32768 - - // BloomConfirms is the number of confirmation blocks before a bloom section is - // considered probably final and its rotated bits are calculated. - BloomConfirms = 256 - - // CHTFrequency is the block frequency for creating CHTs - CHTFrequency = 32768 - - // BloomTrieFrequency is the block frequency for creating BloomTrie on both - // server/client sides. - BloomTrieFrequency = 32768 - - // HelperTrieConfirmations is the number of confirmations before a client is expected - // to have the given HelperTrie available. - HelperTrieConfirmations = 2048 - - // HelperTrieProcessConfirmations is the number of confirmations before a HelperTrie - // is generated - HelperTrieProcessConfirmations = 256 - - // CheckpointFrequency is the block frequency for creating checkpoint - CheckpointFrequency = 32768 - - // CheckpointProcessConfirmations is the number before a checkpoint is generated - CheckpointProcessConfirmations = 256 - - // ImmutabilityThreshold is the number of blocks after which a chain segment is - // considered immutable (i.e. soft finality). It is used by the downloader as a - // hard limit against deep ancestors, by the blockchain against deep reorgs, by - // the freezer as the cutoff treshold and by clique as the snapshot trust limit. - ImmutabilityThreshold = 90000 -) diff --git a/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go b/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go deleted file mode 100644 index ad2c5db..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/protocol_params.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import "math/big" - -const ( - GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. - MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. - GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. - - MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. - ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction. - SloadGas uint64 = 50 // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added. - CallValueTransferGas uint64 = 9000 // Paid for CALL when the value transfer is non-zero. - CallNewAccountGas uint64 = 25000 // Paid for CALL when the destination address didn't exist prior. - TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions. - TxGasContractCreation uint64 = 53000 // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions. - TxDataZeroGas uint64 = 4 // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions. - QuadCoeffDiv uint64 = 512 // Divisor for the quadratic particle of the memory cost equation. - LogDataGas uint64 = 8 // Per byte in a LOG* operation's data. - CallStipend uint64 = 2300 // Free gas given at beginning of call. - - Sha3Gas uint64 = 30 // Once per SHA3 operation. - Sha3WordGas uint64 = 6 // Once per word of the SHA3 operation's data. - - SstoreSetGas uint64 = 20000 // Once per SLOAD operation. - SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero. - SstoreClearGas uint64 = 5000 // Once per SSTORE operation if the zeroness doesn't change. - SstoreRefundGas uint64 = 15000 // Once per SSTORE operation if the zeroness changes to zero. - - NetSstoreNoopGas uint64 = 200 // Once per SSTORE operation if the value doesn't change. - NetSstoreInitGas uint64 = 20000 // Once per SSTORE operation from clean zero. - NetSstoreCleanGas uint64 = 5000 // Once per SSTORE operation from clean non-zero. - NetSstoreDirtyGas uint64 = 200 // Once per SSTORE operation from dirty. - - NetSstoreClearRefund uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot - NetSstoreResetRefund uint64 = 4800 // Once per SSTORE operation for resetting to the original non-zero value - NetSstoreResetClearRefund uint64 = 19800 // Once per SSTORE operation for resetting to the original zero value - - SstoreSentryGasEIP2200 uint64 = 2300 // Minimum gas required to be present for an SSTORE call, not consumed - SstoreNoopGasEIP2200 uint64 = 800 // Once per SSTORE operation if the value doesn't change. - SstoreDirtyGasEIP2200 uint64 = 800 // Once per SSTORE operation if a dirty value is changed. - SstoreInitGasEIP2200 uint64 = 20000 // Once per SSTORE operation from clean zero to non-zero - SstoreInitRefundEIP2200 uint64 = 19200 // Once per SSTORE operation for resetting to the original zero value - SstoreCleanGasEIP2200 uint64 = 5000 // Once per SSTORE operation from clean non-zero to something else - SstoreCleanRefundEIP2200 uint64 = 4200 // Once per SSTORE operation for resetting to the original non-zero value - SstoreClearRefundEIP2200 uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot - - JumpdestGas uint64 = 1 // Once per JUMPDEST operation. - EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. - - CreateDataGas uint64 = 200 // - CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. - ExpGas uint64 = 10 // Once per EXP instruction - LogGas uint64 = 375 // Per LOG* operation. - CopyGas uint64 = 3 // - StackLimit uint64 = 1024 // Maximum size of VM stack allowed. - TierStepGas uint64 = 0 // Once per operation, for a selection of them. - LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. - CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. - Create2Gas uint64 = 32000 // Once per CREATE2 operation - SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. - MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. - TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. - TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul) - - // These have been changed during the course of the chain - CallGasFrontier uint64 = 40 // Once per CALL operation & message call transaction. - CallGasEIP150 uint64 = 700 // Static portion of gas for CALL-derivates after EIP 150 (Tangerine) - BalanceGasFrontier uint64 = 20 // The cost of a BALANCE operation - BalanceGasEIP150 uint64 = 400 // The cost of a BALANCE operation after Tangerine - BalanceGasEIP1884 uint64 = 700 // The cost of a BALANCE operation after EIP 1884 (part of Istanbul) - ExtcodeSizeGasFrontier uint64 = 20 // Cost of EXTCODESIZE before EIP 150 (Tangerine) - ExtcodeSizeGasEIP150 uint64 = 700 // Cost of EXTCODESIZE after EIP 150 (Tangerine) - SloadGasFrontier uint64 = 50 - SloadGasEIP150 uint64 = 200 - SloadGasEIP1884 uint64 = 800 // Cost of SLOAD after EIP 1884 (part of Istanbul) - SloadGasEIP2200 uint64 = 800 // Cost of SLOAD after EIP 2200 (part of Istanbul) - ExtcodeHashGasConstantinople uint64 = 400 // Cost of EXTCODEHASH (introduced in Constantinople) - ExtcodeHashGasEIP1884 uint64 = 700 // Cost of EXTCODEHASH after EIP 1884 (part in Istanbul) - SelfdestructGasEIP150 uint64 = 5000 // Cost of SELFDESTRUCT post EIP 150 (Tangerine) - - // EXP has a dynamic portion depending on the size of the exponent - ExpByteFrontier uint64 = 10 // was set to 10 in Frontier - ExpByteEIP158 uint64 = 50 // was raised to 50 during Eip158 (Spurious Dragon) - - // Extcodecopy has a dynamic AND a static cost. This represents only the - // static portion of the gas. It was changed during EIP 150 (Tangerine) - ExtcodeCopyBaseFrontier uint64 = 20 - ExtcodeCopyBaseEIP150 uint64 = 700 - - // CreateBySelfdestructGas is used when the refunded account is one that does - // not exist. This logic is similar to call. - // Introduced in Tangerine Whistle (Eip 150) - CreateBySelfdestructGas uint64 = 25000 - - MaxCodeSize = 24576 // Maximum bytecode to permit for a contract - - // Precompiled contract gas prices - - EcrecoverGas uint64 = 3000 // Elliptic curve sender recovery gas price - Sha256BaseGas uint64 = 60 // Base price for a SHA256 operation - Sha256PerWordGas uint64 = 12 // Per-word price for a SHA256 operation - Ripemd160BaseGas uint64 = 600 // Base price for a RIPEMD160 operation - Ripemd160PerWordGas uint64 = 120 // Per-word price for a RIPEMD160 operation - IdentityBaseGas uint64 = 15 // Base price for a data copy operation - IdentityPerWordGas uint64 = 3 // Per-work price for a data copy operation - ModExpQuadCoeffDiv uint64 = 20 // Divisor for the quadratic particle of the big int modular exponentiation - - Bn256AddGasByzantium uint64 = 500 // Byzantium gas needed for an elliptic curve addition - Bn256AddGasIstanbul uint64 = 150 // Gas needed for an elliptic curve addition - Bn256ScalarMulGasByzantium uint64 = 40000 // Byzantium gas needed for an elliptic curve scalar multiplication - Bn256ScalarMulGasIstanbul uint64 = 6000 // Gas needed for an elliptic curve scalar multiplication - Bn256PairingBaseGasByzantium uint64 = 100000 // Byzantium base price for an elliptic curve pairing check - Bn256PairingBaseGasIstanbul uint64 = 45000 // Base price for an elliptic curve pairing check - Bn256PairingPerPointGasByzantium uint64 = 80000 // Byzantium per-point price for an elliptic curve pairing check - Bn256PairingPerPointGasIstanbul uint64 = 34000 // Per-point price for an elliptic curve pairing check -) - -var ( - DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations. - GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block. - MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be. - DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. -) diff --git a/vendor/github.com/ethereum/go-ethereum/params/version.go b/vendor/github.com/ethereum/go-ethereum/params/version.go deleted file mode 100644 index 97df554..0000000 --- a/vendor/github.com/ethereum/go-ethereum/params/version.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import ( - "fmt" -) - -const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 9 // Minor version component of the current release - VersionPatch = 12 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string -) - -// Version holds the textual version string. -var Version = func() string { - return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) -}() - -// VersionWithMeta holds the textual version string including the metadata. -var VersionWithMeta = func() string { - v := Version - if VersionMeta != "" { - v += "-" + VersionMeta - } - return v -}() - -// ArchiveVersion holds the textual version string used for Geth archives. -// e.g. "1.8.11-dea1ce05" for stable releases, or -// "1.8.13-unstable-21c059b6" for unstable releases -func ArchiveVersion(gitCommit string) string { - vsn := Version - if VersionMeta != "stable" { - vsn += "-" + VersionMeta - } - if len(gitCommit) >= 8 { - vsn += "-" + gitCommit[:8] - } - return vsn -} - -func VersionWithCommit(gitCommit, gitDate string) string { - vsn := VersionWithMeta - if len(gitCommit) >= 8 { - vsn += "-" + gitCommit[:8] - } - if (VersionMeta != "stable") && (gitDate != "") { - vsn += "-" + gitDate - } - return vsn -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/committer.go b/vendor/github.com/ethereum/go-ethereum/trie/committer.go deleted file mode 100644 index eacefdf..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/committer.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "errors" - "fmt" - "sync" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" -) - -// leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow -// some paralellism but not incur too much memory overhead. -const leafChanSize = 200 - -// leaf represents a trie leaf value -type leaf struct { - size int // size of the rlp data (estimate) - hash common.Hash // hash of rlp data - node node // the node to commit - vnodes bool // set to true if the node (possibly) contains a valueNode -} - -// committer is a type used for the trie Commit operation. A committer has some -// internal preallocated temp space, and also a callback that is invoked when -// leaves are committed. The leafs are passed through the `leafCh`, to allow -// some level of paralellism. -// By 'some level' of parallelism, it's still the case that all leaves will be -// processed sequentially - onleaf will never be called in parallel or out of order. -type committer struct { - tmp sliceBuffer - sha keccakState - - onleaf LeafCallback - leafCh chan *leaf -} - -// committers live in a global sync.Pool -var committerPool = sync.Pool{ - New: func() interface{} { - return &committer{ - tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode. - sha: sha3.NewLegacyKeccak256().(keccakState), - } - }, -} - -// newCommitter creates a new committer or picks one from the pool. -func newCommitter() *committer { - return committerPool.Get().(*committer) -} - -func returnCommitterToPool(h *committer) { - h.onleaf = nil - h.leafCh = nil - committerPool.Put(h) -} - -// commitNeeded returns 'false' if the given node is already in sync with db -func (c *committer) commitNeeded(n node) bool { - hash, dirty := n.cache() - return hash == nil || dirty -} - -// commit collapses a node down into a hash node and inserts it into the database -func (c *committer) Commit(n node, db *Database) (hashNode, error) { - if db == nil { - return nil, errors.New("no db provided") - } - h, err := c.commit(n, db, true) - if err != nil { - return nil, err - } - return h.(hashNode), nil -} - -// commit collapses a node down into a hash node and inserts it into the database -func (c *committer) commit(n node, db *Database, force bool) (node, error) { - // if this path is clean, use available cached data - hash, dirty := n.cache() - if hash != nil && !dirty { - return hash, nil - } - // Commit children, then parent, and remove remove the dirty flag. - switch cn := n.(type) { - case *shortNode: - // Commit child - collapsed := cn.copy() - if _, ok := cn.Val.(valueNode); !ok { - if childV, err := c.commit(cn.Val, db, false); err != nil { - return nil, err - } else { - collapsed.Val = childV - } - } - // The key needs to be copied, since we're delivering it to database - collapsed.Key = hexToCompact(cn.Key) - hashedNode := c.store(collapsed, db, force, true) - if hn, ok := hashedNode.(hashNode); ok { - return hn, nil - } else { - return collapsed, nil - } - case *fullNode: - hashedKids, hasVnodes, err := c.commitChildren(cn, db, force) - if err != nil { - return nil, err - } - collapsed := cn.copy() - collapsed.Children = hashedKids - - hashedNode := c.store(collapsed, db, force, hasVnodes) - if hn, ok := hashedNode.(hashNode); ok { - return hn, nil - } else { - return collapsed, nil - } - case valueNode: - return c.store(cn, db, force, false), nil - // hashnodes aren't stored - case hashNode: - return cn, nil - } - return hash, nil -} - -// commitChildren commits the children of the given fullnode -func (c *committer) commitChildren(n *fullNode, db *Database, force bool) ([17]node, bool, error) { - var children [17]node - var hasValueNodeChildren = false - for i, child := range n.Children { - if child == nil { - continue - } - hnode, err := c.commit(child, db, false) - if err != nil { - return children, false, err - } - children[i] = hnode - if _, ok := hnode.(valueNode); ok { - hasValueNodeChildren = true - } - } - return children, hasValueNodeChildren, nil -} - -// store hashes the node n and if we have a storage layer specified, it writes -// the key/value pair to it and tracks any node->child references as well as any -// node->external trie references. -func (c *committer) store(n node, db *Database, force bool, hasVnodeChildren bool) node { - // Larger nodes are replaced by their hash and stored in the database. - var ( - hash, _ = n.cache() - size int - ) - if hash == nil { - if vn, ok := n.(valueNode); ok { - c.tmp.Reset() - if err := rlp.Encode(&c.tmp, vn); err != nil { - panic("encode error: " + err.Error()) - } - size = len(c.tmp) - if size < 32 && !force { - return n // Nodes smaller than 32 bytes are stored inside their parent - } - hash = c.makeHashNode(c.tmp) - } else { - // This was not generated - must be a small node stored in the parent - // No need to do anything here - return n - } - } else { - // We have the hash already, estimate the RLP encoding-size of the node. - // The size is used for mem tracking, does not need to be exact - size = estimateSize(n) - } - // If we're using channel-based leaf-reporting, send to channel. - // The leaf channel will be active only when there an active leaf-callback - if c.leafCh != nil { - c.leafCh <- &leaf{ - size: size, - hash: common.BytesToHash(hash), - node: n, - vnodes: hasVnodeChildren, - } - } else if db != nil { - // No leaf-callback used, but there's still a database. Do serial - // insertion - db.lock.Lock() - db.insert(common.BytesToHash(hash), size, n) - db.lock.Unlock() - } - return hash -} - -// commitLoop does the actual insert + leaf callback for nodes -func (c *committer) commitLoop(db *Database) { - for item := range c.leafCh { - var ( - hash = item.hash - size = item.size - n = item.node - hasVnodes = item.vnodes - ) - // We are pooling the trie nodes into an intermediate memory cache - db.lock.Lock() - db.insert(hash, size, n) - db.lock.Unlock() - if c.onleaf != nil && hasVnodes { - switch n := n.(type) { - case *shortNode: - if child, ok := n.Val.(valueNode); ok { - c.onleaf(child, hash) - } - case *fullNode: - for i := 0; i < 16; i++ { - if child, ok := n.Children[i].(valueNode); ok { - c.onleaf(child, hash) - } - } - } - } - } -} - -func (c *committer) makeHashNode(data []byte) hashNode { - n := make(hashNode, c.sha.Size()) - c.sha.Reset() - c.sha.Write(data) - c.sha.Read(n) - return n -} - -// estimateSize estimates the size of an rlp-encoded node, without actually -// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie -// with 1000 leafs, the only errors above 1% are on small shortnodes, where this -// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) -func estimateSize(n node) int { - switch n := n.(type) { - case *shortNode: - // A short node contains a compacted key, and a value. - return 3 + len(n.Key) + estimateSize(n.Val) - case *fullNode: - // A full node contains up to 16 hashes (some nils), and a key - s := 3 - for i := 0; i < 16; i++ { - if child := n.Children[i]; child != nil { - s += estimateSize(child) - } else { - s += 1 - } - } - return s - case valueNode: - return 1 + len(n) - case hashNode: - return 1 + len(n) - default: - panic(fmt.Sprintf("node type %T", n)) - - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/database.go b/vendor/github.com/ethereum/go-ethereum/trie/database.go deleted file mode 100644 index 5b67393..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/database.go +++ /dev/null @@ -1,851 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "errors" - "fmt" - "io" - "reflect" - "sync" - "time" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/rlp" -) - -var ( - memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) - memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) - memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) - memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) - - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) - - memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) - memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) - memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) - - memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) - memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) - memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) - - memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) - memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) - memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) -) - -// secureKeyPrefix is the database key prefix used to store trie node preimages. -var secureKeyPrefix = []byte("secure-key-") - -// secureKeyLength is the length of the above prefix + 32byte hash. -const secureKeyLength = 11 + 32 - -// Database is an intermediate write layer between the trie data structures and -// the disk database. The aim is to accumulate trie writes in-memory and only -// periodically flush a couple tries to disk, garbage collecting the remainder. -// -// Note, the trie Database is **not** thread safe in its mutations, but it **is** -// thread safe in providing individual, independent node access. The rationale -// behind this split design is to provide read access to RPC handlers and sync -// servers even while the trie is executing expensive garbage collection. -type Database struct { - diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes - - cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs - dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes - oldest common.Hash // Oldest tracked node, flush-list head - newest common.Hash // Newest tracked node, flush-list tail - - preimages map[common.Hash][]byte // Preimages of nodes from the secure trie - seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys - - gctime time.Duration // Time spent on garbage collection since last commit - gcnodes uint64 // Nodes garbage collected since last commit - gcsize common.StorageSize // Data storage garbage collected since last commit - - flushtime time.Duration // Time spent on data flushing since last commit - flushnodes uint64 // Nodes flushed since last commit - flushsize common.StorageSize // Data storage flushed since last commit - - dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) - childrenSize common.StorageSize // Storage size of the external children tracking - preimagesSize common.StorageSize // Storage size of the preimages cache - - lock sync.RWMutex -} - -// rawNode is a simple binary blob used to differentiate between collapsed trie -// nodes and already encoded RLP binary blobs (while at the same time store them -// in the same cache fields). -type rawNode []byte - -func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } - -// rawFullNode represents only the useful data content of a full node, with the -// caches and flags stripped out to minimize its data storage. This type honors -// the same RLP encoding as the original parent. -type rawFullNode [17]node - -func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } - -func (n rawFullNode) EncodeRLP(w io.Writer) error { - var nodes [17]node - - for i, child := range n { - if child != nil { - nodes[i] = child - } else { - nodes[i] = nilValueNode - } - } - return rlp.Encode(w, nodes) -} - -// rawShortNode represents only the useful data content of a short node, with the -// caches and flags stripped out to minimize its data storage. This type honors -// the same RLP encoding as the original parent. -type rawShortNode struct { - Key []byte - Val node -} - -func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } - -// cachedNode is all the information we know about a single cached node in the -// memory database write layer. -type cachedNode struct { - node node // Cached collapsed trie node, or raw rlp data - size uint16 // Byte size of the useful cached data - - parents uint32 // Number of live nodes referencing this one - children map[common.Hash]uint16 // External children referenced by this node - - flushPrev common.Hash // Previous node in the flush-list - flushNext common.Hash // Next node in the flush-list -} - -// cachedNodeSize is the raw size of a cachedNode data structure without any -// node data included. It's an approximate size, but should be a lot better -// than not counting them. -var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) - -// cachedNodeChildrenSize is the raw size of an initialized but empty external -// reference map. -const cachedNodeChildrenSize = 48 - -// rlp returns the raw rlp encoded blob of the cached node, either directly from -// the cache, or by regenerating it from the collapsed node. -func (n *cachedNode) rlp() []byte { - if node, ok := n.node.(rawNode); ok { - return node - } - blob, err := rlp.EncodeToBytes(n.node) - if err != nil { - panic(err) - } - return blob -} - -// obj returns the decoded and expanded trie node, either directly from the cache, -// or by regenerating it from the rlp encoded blob. -func (n *cachedNode) obj(hash common.Hash) node { - if node, ok := n.node.(rawNode); ok { - return mustDecodeNode(hash[:], node) - } - return expandNode(hash[:], n.node) -} - -// forChilds invokes the callback for all the tracked children of this node, -// both the implicit ones from inside the node as well as the explicit ones -//from outside the node. -func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { - for child := range n.children { - onChild(child) - } - if _, ok := n.node.(rawNode); !ok { - forGatherChildren(n.node, onChild) - } -} - -// forGatherChildren traverses the node hierarchy of a collapsed storage node and -// invokes the callback for all the hashnode children. -func forGatherChildren(n node, onChild func(hash common.Hash)) { - switch n := n.(type) { - case *rawShortNode: - forGatherChildren(n.Val, onChild) - case rawFullNode: - for i := 0; i < 16; i++ { - forGatherChildren(n[i], onChild) - } - case hashNode: - onChild(common.BytesToHash(n)) - case valueNode, nil: - default: - panic(fmt.Sprintf("unknown node type: %T", n)) - } -} - -// simplifyNode traverses the hierarchy of an expanded memory node and discards -// all the internal caches, returning a node that only contains the raw data. -func simplifyNode(n node) node { - switch n := n.(type) { - case *shortNode: - // Short nodes discard the flags and cascade - return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} - - case *fullNode: - // Full nodes discard the flags and cascade - node := rawFullNode(n.Children) - for i := 0; i < len(node); i++ { - if node[i] != nil { - node[i] = simplifyNode(node[i]) - } - } - return node - - case valueNode, hashNode, rawNode: - return n - - default: - panic(fmt.Sprintf("unknown node type: %T", n)) - } -} - -// expandNode traverses the node hierarchy of a collapsed storage node and converts -// all fields and keys into expanded memory form. -func expandNode(hash hashNode, n node) node { - switch n := n.(type) { - case *rawShortNode: - // Short nodes need key and child expansion - return &shortNode{ - Key: compactToHex(n.Key), - Val: expandNode(nil, n.Val), - flags: nodeFlag{ - hash: hash, - }, - } - - case rawFullNode: - // Full nodes need child expansion - node := &fullNode{ - flags: nodeFlag{ - hash: hash, - }, - } - for i := 0; i < len(node.Children); i++ { - if n[i] != nil { - node.Children[i] = expandNode(nil, n[i]) - } - } - return node - - case valueNode, hashNode: - return n - - default: - panic(fmt.Sprintf("unknown node type: %T", n)) - } -} - -// NewDatabase creates a new trie database to store ephemeral trie content before -// its written out to disk or garbage collected. No read cache is created, so all -// data retrievals will hit the underlying disk database. -func NewDatabase(diskdb ethdb.KeyValueStore) *Database { - return NewDatabaseWithCache(diskdb, 0) -} - -// NewDatabaseWithCache creates a new trie database to store ephemeral trie content -// before its written out to disk or garbage collected. It also acts as a read cache -// for nodes loaded from disk. -func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database { - var cleans *fastcache.Cache - if cache > 0 { - cleans = fastcache.New(cache * 1024 * 1024) - } - return &Database{ - diskdb: diskdb, - cleans: cleans, - dirties: map[common.Hash]*cachedNode{{}: { - children: make(map[common.Hash]uint16), - }}, - preimages: make(map[common.Hash][]byte), - } -} - -// DiskDB retrieves the persistent storage backing the trie database. -func (db *Database) DiskDB() ethdb.KeyValueReader { - return db.diskdb -} - -// InsertBlob writes a new reference tracked blob to the memory database if it's -// yet unknown. This method should only be used for non-trie nodes that require -// reference counting, since trie nodes are garbage collected directly through -// their embedded children. -func (db *Database) InsertBlob(hash common.Hash, blob []byte) { - db.lock.Lock() - defer db.lock.Unlock() - - db.insert(hash, len(blob), rawNode(blob)) -} - -// insert inserts a collapsed trie node into the memory database. This method is -// a more generic version of InsertBlob, supporting both raw blob insertions as -// well ex trie node insertions. The blob size must be specified to allow proper -// size tracking. -func (db *Database) insert(hash common.Hash, size int, node node) { - // If the node's already cached, skip - if _, ok := db.dirties[hash]; ok { - return - } - memcacheDirtyWriteMeter.Mark(int64(size)) - - // Create the cached entry for this node - entry := &cachedNode{ - node: simplifyNode(node), - size: uint16(size), - flushPrev: db.newest, - } - entry.forChilds(func(child common.Hash) { - if c := db.dirties[child]; c != nil { - c.parents++ - } - }) - db.dirties[hash] = entry - - // Update the flush-list endpoints - if db.oldest == (common.Hash{}) { - db.oldest, db.newest = hash, hash - } else { - db.dirties[db.newest].flushNext, db.newest = hash, hash - } - db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) -} - -// insertPreimage writes a new trie node pre-image to the memory database if it's -// yet unknown. The method will make a copy of the slice. -// -// Note, this method assumes that the database's lock is held! -func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { - if _, ok := db.preimages[hash]; ok { - return - } - db.preimages[hash] = common.CopyBytes(preimage) - db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) -} - -// node retrieves a cached trie node from memory, or returns nil if none can be -// found in the memory cache. -func (db *Database) node(hash common.Hash) node { - // Retrieve the node from the clean cache if available - if db.cleans != nil { - if enc := db.cleans.Get(nil, hash[:]); enc != nil { - memcacheCleanHitMeter.Mark(1) - memcacheCleanReadMeter.Mark(int64(len(enc))) - return mustDecodeNode(hash[:], enc) - } - } - // Retrieve the node from the dirty cache if available - db.lock.RLock() - dirty := db.dirties[hash] - db.lock.RUnlock() - - if dirty != nil { - memcacheDirtyHitMeter.Mark(1) - memcacheDirtyReadMeter.Mark(int64(dirty.size)) - return dirty.obj(hash) - } - memcacheDirtyMissMeter.Mark(1) - - // Content unavailable in memory, attempt to retrieve from disk - enc, err := db.diskdb.Get(hash[:]) - if err != nil || enc == nil { - return nil - } - if db.cleans != nil { - db.cleans.Set(hash[:], enc) - memcacheCleanMissMeter.Mark(1) - memcacheCleanWriteMeter.Mark(int64(len(enc))) - } - return mustDecodeNode(hash[:], enc) -} - -// Node retrieves an encoded cached trie node from memory. If it cannot be found -// cached, the method queries the persistent database for the content. -func (db *Database) Node(hash common.Hash) ([]byte, error) { - // It doens't make sense to retrieve the metaroot - if hash == (common.Hash{}) { - return nil, errors.New("not found") - } - // Retrieve the node from the clean cache if available - if db.cleans != nil { - if enc := db.cleans.Get(nil, hash[:]); enc != nil { - memcacheCleanHitMeter.Mark(1) - memcacheCleanReadMeter.Mark(int64(len(enc))) - return enc, nil - } - } - // Retrieve the node from the dirty cache if available - db.lock.RLock() - dirty := db.dirties[hash] - db.lock.RUnlock() - - if dirty != nil { - memcacheDirtyHitMeter.Mark(1) - memcacheDirtyReadMeter.Mark(int64(dirty.size)) - return dirty.rlp(), nil - } - memcacheDirtyMissMeter.Mark(1) - - // Content unavailable in memory, attempt to retrieve from disk - enc, err := db.diskdb.Get(hash[:]) - if err == nil && enc != nil { - if db.cleans != nil { - db.cleans.Set(hash[:], enc) - memcacheCleanMissMeter.Mark(1) - memcacheCleanWriteMeter.Mark(int64(len(enc))) - } - } - return enc, err -} - -// preimage retrieves a cached trie node pre-image from memory. If it cannot be -// found cached, the method queries the persistent database for the content. -func (db *Database) preimage(hash common.Hash) ([]byte, error) { - // Retrieve the node from cache if available - db.lock.RLock() - preimage := db.preimages[hash] - db.lock.RUnlock() - - if preimage != nil { - return preimage, nil - } - // Content unavailable in memory, attempt to retrieve from disk - return db.diskdb.Get(db.secureKey(hash[:])) -} - -// secureKey returns the database key for the preimage of key, as an ephemeral -// buffer. The caller must not hold onto the return value because it will become -// invalid on the next call. -func (db *Database) secureKey(key []byte) []byte { - buf := append(db.seckeybuf[:0], secureKeyPrefix...) - buf = append(buf, key...) - return buf -} - -// Nodes retrieves the hashes of all the nodes cached within the memory database. -// This method is extremely expensive and should only be used to validate internal -// states in test code. -func (db *Database) Nodes() []common.Hash { - db.lock.RLock() - defer db.lock.RUnlock() - - var hashes = make([]common.Hash, 0, len(db.dirties)) - for hash := range db.dirties { - if hash != (common.Hash{}) { // Special case for "root" references/nodes - hashes = append(hashes, hash) - } - } - return hashes -} - -// Reference adds a new reference from a parent node to a child node. -func (db *Database) Reference(child common.Hash, parent common.Hash) { - db.lock.Lock() - defer db.lock.Unlock() - - db.reference(child, parent) -} - -// reference is the private locked version of Reference. -func (db *Database) reference(child common.Hash, parent common.Hash) { - // If the node does not exist, it's a node pulled from disk, skip - node, ok := db.dirties[child] - if !ok { - return - } - // If the reference already exists, only duplicate for roots - if db.dirties[parent].children == nil { - db.dirties[parent].children = make(map[common.Hash]uint16) - db.childrenSize += cachedNodeChildrenSize - } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { - return - } - node.parents++ - db.dirties[parent].children[child]++ - if db.dirties[parent].children[child] == 1 { - db.childrenSize += common.HashLength + 2 // uint16 counter - } -} - -// Dereference removes an existing reference from a root node. -func (db *Database) Dereference(root common.Hash) { - // Sanity check to ensure that the meta-root is not removed - if root == (common.Hash{}) { - log.Error("Attempted to dereference the trie cache meta root") - return - } - db.lock.Lock() - defer db.lock.Unlock() - - nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() - db.dereference(root, common.Hash{}) - - db.gcnodes += uint64(nodes - len(db.dirties)) - db.gcsize += storage - db.dirtiesSize - db.gctime += time.Since(start) - - memcacheGCTimeTimer.Update(time.Since(start)) - memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) - memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) - - log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), - "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) -} - -// dereference is the private locked version of Dereference. -func (db *Database) dereference(child common.Hash, parent common.Hash) { - // Dereference the parent-child - node := db.dirties[parent] - - if node.children != nil && node.children[child] > 0 { - node.children[child]-- - if node.children[child] == 0 { - delete(node.children, child) - db.childrenSize -= (common.HashLength + 2) // uint16 counter - } - } - // If the child does not exist, it's a previously committed node. - node, ok := db.dirties[child] - if !ok { - return - } - // If there are no more references to the child, delete it and cascade - if node.parents > 0 { - // This is a special cornercase where a node loaded from disk (i.e. not in the - // memcache any more) gets reinjected as a new node (short node split into full, - // then reverted into short), causing a cached node to have no parents. That is - // no problem in itself, but don't make maxint parents out of it. - node.parents-- - } - if node.parents == 0 { - // Remove the node from the flush-list - switch child { - case db.oldest: - db.oldest = node.flushNext - db.dirties[node.flushNext].flushPrev = common.Hash{} - case db.newest: - db.newest = node.flushPrev - db.dirties[node.flushPrev].flushNext = common.Hash{} - default: - db.dirties[node.flushPrev].flushNext = node.flushNext - db.dirties[node.flushNext].flushPrev = node.flushPrev - } - // Dereference all children and delete the node - node.forChilds(func(hash common.Hash) { - db.dereference(hash, child) - }) - delete(db.dirties, child) - db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) - if node.children != nil { - db.childrenSize -= cachedNodeChildrenSize - } - } -} - -// Cap iteratively flushes old but still referenced trie nodes until the total -// memory usage goes below the given threshold. -// -// Note, this method is a non-synchronized mutator. It is unsafe to call this -// concurrently with other mutators. -func (db *Database) Cap(limit common.StorageSize) error { - // Create a database batch to flush persistent data out. It is important that - // outside code doesn't see an inconsistent state (referenced data removed from - // memory cache during commit but not yet in persistent storage). This is ensured - // by only uncaching existing data when the database write finalizes. - nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() - batch := db.diskdb.NewBatch() - - // db.dirtiesSize only contains the useful data in the cache, but when reporting - // the total memory consumption, the maintenance metadata is also needed to be - // counted. - size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) - size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) - - // If the preimage cache got large enough, push to disk. If it's still small - // leave for later to deduplicate writes. - flushPreimages := db.preimagesSize > 4*1024*1024 - if flushPreimages { - for hash, preimage := range db.preimages { - if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) - return err - } - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } - } - } - // Keep committing nodes from the flush-list until we're below allowance - oldest := db.oldest - for size > limit && oldest != (common.Hash{}) { - // Fetch the oldest referenced node and push into the batch - node := db.dirties[oldest] - if err := batch.Put(oldest[:], node.rlp()); err != nil { - return err - } - // If we exceeded the ideal batch size, commit and reset - if batch.ValueSize() >= ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - log.Error("Failed to write flush list to disk", "err", err) - return err - } - batch.Reset() - } - // Iterate to the next flush item, or abort if the size cap was achieved. Size - // is the total size, including the useful cached data (hash -> blob), the - // cache item metadata, as well as external children mappings. - size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) - if node.children != nil { - size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) - } - oldest = node.flushNext - } - // Flush out any remainder data from the last batch - if err := batch.Write(); err != nil { - log.Error("Failed to write flush list to disk", "err", err) - return err - } - // Write successful, clear out the flushed data - db.lock.Lock() - defer db.lock.Unlock() - - if flushPreimages { - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 - } - for db.oldest != oldest { - node := db.dirties[db.oldest] - delete(db.dirties, db.oldest) - db.oldest = node.flushNext - - db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) - if node.children != nil { - db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) - } - } - if db.oldest != (common.Hash{}) { - db.dirties[db.oldest].flushPrev = common.Hash{} - } - db.flushnodes += uint64(nodes - len(db.dirties)) - db.flushsize += storage - db.dirtiesSize - db.flushtime += time.Since(start) - - memcacheFlushTimeTimer.Update(time.Since(start)) - memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) - memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) - - log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), - "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) - - return nil -} - -// Commit iterates over all the children of a particular node, writes them out -// to disk, forcefully tearing down all references in both directions. As a side -// effect, all pre-images accumulated up to this point are also written. -// -// Note, this method is a non-synchronized mutator. It is unsafe to call this -// concurrently with other mutators. -func (db *Database) Commit(node common.Hash, report bool) error { - // Create a database batch to flush persistent data out. It is important that - // outside code doesn't see an inconsistent state (referenced data removed from - // memory cache during commit but not yet in persistent storage). This is ensured - // by only uncaching existing data when the database write finalizes. - start := time.Now() - batch := db.diskdb.NewBatch() - - // Move all of the accumulated preimages into a write batch - for hash, preimage := range db.preimages { - if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { - log.Error("Failed to commit preimage from trie database", "err", err) - return err - } - // If the batch is too large, flush to disk - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - } - } - // Since we're going to replay trie node writes into the clean cache, flush out - // any batched pre-images before continuing. - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - - // Move the trie itself into the batch, flushing if enough data is accumulated - nodes, storage := len(db.dirties), db.dirtiesSize - - uncacher := &cleaner{db} - if err := db.commit(node, batch, uncacher); err != nil { - log.Error("Failed to commit trie from trie database", "err", err) - return err - } - // Trie mostly committed to disk, flush any batch leftovers - if err := batch.Write(); err != nil { - log.Error("Failed to write trie to disk", "err", err) - return err - } - // Uncache any leftovers in the last batch - db.lock.Lock() - defer db.lock.Unlock() - - batch.Replay(uncacher) - batch.Reset() - - // Reset the storage counters and bumpd metrics - db.preimages = make(map[common.Hash][]byte) - db.preimagesSize = 0 - - memcacheCommitTimeTimer.Update(time.Since(start)) - memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) - memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) - - logger := log.Info - if !report { - logger = log.Debug - } - logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, - "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) - - // Reset the garbage collection statistics - db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 - db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 - - return nil -} - -// commit is the private locked version of Commit. -func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error { - // If the node does not exist, it's a previously committed node - node, ok := db.dirties[hash] - if !ok { - return nil - } - var err error - node.forChilds(func(child common.Hash) { - if err == nil { - err = db.commit(child, batch, uncacher) - } - }) - if err != nil { - return err - } - if err := batch.Put(hash[:], node.rlp()); err != nil { - return err - } - // If we've reached an optimal batch size, commit and start over - if batch.ValueSize() >= ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err - } - db.lock.Lock() - batch.Replay(uncacher) - batch.Reset() - db.lock.Unlock() - } - return nil -} - -// cleaner is a database batch replayer that takes a batch of write operations -// and cleans up the trie database from anything written to disk. -type cleaner struct { - db *Database -} - -// Put reacts to database writes and implements dirty data uncaching. This is the -// post-processing step of a commit operation where the already persisted trie is -// removed from the dirty cache and moved into the clean cache. The reason behind -// the two-phase commit is to ensure ensure data availability while moving from -// memory to disk. -func (c *cleaner) Put(key []byte, rlp []byte) error { - hash := common.BytesToHash(key) - - // If the node does not exist, we're done on this path - node, ok := c.db.dirties[hash] - if !ok { - return nil - } - // Node still exists, remove it from the flush-list - switch hash { - case c.db.oldest: - c.db.oldest = node.flushNext - c.db.dirties[node.flushNext].flushPrev = common.Hash{} - case c.db.newest: - c.db.newest = node.flushPrev - c.db.dirties[node.flushPrev].flushNext = common.Hash{} - default: - c.db.dirties[node.flushPrev].flushNext = node.flushNext - c.db.dirties[node.flushNext].flushPrev = node.flushPrev - } - // Remove the node from the dirty cache - delete(c.db.dirties, hash) - c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) - if node.children != nil { - c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) - } - // Move the flushed node into the clean cache to prevent insta-reloads - if c.db.cleans != nil { - c.db.cleans.Set(hash[:], rlp) - memcacheCleanWriteMeter.Mark(int64(len(rlp))) - } - return nil -} - -func (c *cleaner) Delete(key []byte) error { - panic("not implemented") -} - -// Size returns the current storage size of the memory cache in front of the -// persistent database layer. -func (db *Database) Size() (common.StorageSize, common.StorageSize) { - db.lock.RLock() - defer db.lock.RUnlock() - - // db.dirtiesSize only contains the useful data in the cache, but when reporting - // the total memory consumption, the maintenance metadata is also needed to be - // counted. - var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) - var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) - return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/encoding.go b/vendor/github.com/ethereum/go-ethereum/trie/encoding.go deleted file mode 100644 index 1955a3e..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/encoding.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -// Trie keys are dealt with in three distinct encodings: -// -// KEYBYTES encoding contains the actual key and nothing else. This encoding is the -// input to most API functions. -// -// HEX encoding contains one byte for each nibble of the key and an optional trailing -// 'terminator' byte of value 0x10 which indicates whether or not the node at the key -// contains a value. Hex key encoding is used for nodes loaded in memory because it's -// convenient to access. -// -// COMPACT encoding is defined by the Ethereum Yellow Paper (it's called "hex prefix -// encoding" there) and contains the bytes of the key and a flag. The high nibble of the -// first byte contains the flag; the lowest bit encoding the oddness of the length and -// the second-lowest encoding whether the node at the key is a value node. The low nibble -// of the first byte is zero in the case of an even number of nibbles and the first nibble -// in the case of an odd number. All remaining nibbles (now an even number) fit properly -// into the remaining bytes. Compact encoding is used for nodes stored on disk. - -func hexToCompact(hex []byte) []byte { - terminator := byte(0) - if hasTerm(hex) { - terminator = 1 - hex = hex[:len(hex)-1] - } - buf := make([]byte, len(hex)/2+1) - buf[0] = terminator << 5 // the flag byte - if len(hex)&1 == 1 { - buf[0] |= 1 << 4 // odd flag - buf[0] |= hex[0] // first nibble is contained in the first byte - hex = hex[1:] - } - decodeNibbles(hex, buf[1:]) - return buf -} - -func compactToHex(compact []byte) []byte { - if len(compact) == 0 { - return compact - } - base := keybytesToHex(compact) - // delete terminator flag - if base[0] < 2 { - base = base[:len(base)-1] - } - // apply odd flag - chop := 2 - base[0]&1 - return base[chop:] -} - -func keybytesToHex(str []byte) []byte { - l := len(str)*2 + 1 - var nibbles = make([]byte, l) - for i, b := range str { - nibbles[i*2] = b / 16 - nibbles[i*2+1] = b % 16 - } - nibbles[l-1] = 16 - return nibbles -} - -// hexToKeybytes turns hex nibbles into key bytes. -// This can only be used for keys of even length. -func hexToKeybytes(hex []byte) []byte { - if hasTerm(hex) { - hex = hex[:len(hex)-1] - } - if len(hex)&1 != 0 { - panic("can't convert hex key of odd length") - } - key := make([]byte, len(hex)/2) - decodeNibbles(hex, key) - return key -} - -func decodeNibbles(nibbles []byte, bytes []byte) { - for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 { - bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1] - } -} - -// prefixLen returns the length of the common prefix of a and b. -func prefixLen(a, b []byte) int { - var i, length = 0, len(a) - if len(b) < length { - length = len(b) - } - for ; i < length; i++ { - if a[i] != b[i] { - break - } - } - return i -} - -// hasTerm returns whether a hex key has the terminator flag. -func hasTerm(s []byte) bool { - return len(s) > 0 && s[len(s)-1] == 16 -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/errors.go b/vendor/github.com/ethereum/go-ethereum/trie/errors.go deleted file mode 100644 index 567b800..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" -) - -// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete) -// in the case where a trie node is not present in the local database. It contains -// information necessary for retrieving the missing node. -type MissingNodeError struct { - NodeHash common.Hash // hash of the missing node - Path []byte // hex-encoded path to the missing node -} - -func (err *MissingNodeError) Error() string { - return fmt.Sprintf("missing trie node %x (path %x)", err.NodeHash, err.Path) -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/hasher.go b/vendor/github.com/ethereum/go-ethereum/trie/hasher.go deleted file mode 100644 index 8e8eec9..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/hasher.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "hash" - "sync" - - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" -) - -// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports -// Read to get a variable amount of data from the hash state. Read is faster than Sum -// because it doesn't copy the internal state, but also modifies the internal state. -type keccakState interface { - hash.Hash - Read([]byte) (int, error) -} - -type sliceBuffer []byte - -func (b *sliceBuffer) Write(data []byte) (n int, err error) { - *b = append(*b, data...) - return len(data), nil -} - -func (b *sliceBuffer) Reset() { - *b = (*b)[:0] -} - -// hasher is a type used for the trie Hash operation. A hasher has some -// internal preallocated temp space -type hasher struct { - sha keccakState - tmp sliceBuffer - parallel bool // Whether to use paralallel threads when hashing -} - -// hasherPool holds pureHashers -var hasherPool = sync.Pool{ - New: func() interface{} { - return &hasher{ - tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode. - sha: sha3.NewLegacyKeccak256().(keccakState), - } - }, -} - -func newHasher(parallel bool) *hasher { - h := hasherPool.Get().(*hasher) - h.parallel = parallel - return h -} - -func returnHasherToPool(h *hasher) { - hasherPool.Put(h) -} - -// hash collapses a node down into a hash node, also returning a copy of the -// original node initialized with the computed hash to replace the original one. -func (h *hasher) hash(n node, force bool) (hashed node, cached node) { - // We're not storing the node, just hashing, use available cached data - if hash, _ := n.cache(); hash != nil { - return hash, n - } - // Trie not processed yet or needs storage, walk the children - switch n := n.(type) { - case *shortNode: - collapsed, cached := h.hashShortNodeChildren(n) - hashed := h.shortnodeToHash(collapsed, force) - // We need to retain the possibly _not_ hashed node, in case it was too - // small to be hashed - if hn, ok := hashed.(hashNode); ok { - cached.flags.hash = hn - } else { - cached.flags.hash = nil - } - return hashed, cached - case *fullNode: - collapsed, cached := h.hashFullNodeChildren(n) - hashed = h.fullnodeToHash(collapsed, force) - if hn, ok := hashed.(hashNode); ok { - cached.flags.hash = hn - } else { - cached.flags.hash = nil - } - return hashed, cached - default: - // Value and hash nodes don't have children so they're left as were - return n, n - } -} - -// hashShortNodeChildren collapses the short node. The returned collapsed node -// holds a live reference to the Key, and must not be modified. -// The cached -func (h *hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) { - // Hash the short node's child, caching the newly hashed subtree - collapsed, cached = n.copy(), n.copy() - // Previously, we did copy this one. We don't seem to need to actually - // do that, since we don't overwrite/reuse keys - //cached.Key = common.CopyBytes(n.Key) - collapsed.Key = hexToCompact(n.Key) - // Unless the child is a valuenode or hashnode, hash it - switch n.Val.(type) { - case *fullNode, *shortNode: - collapsed.Val, cached.Val = h.hash(n.Val, false) - } - return collapsed, cached -} - -func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) { - // Hash the full node's children, caching the newly hashed subtrees - cached = n.copy() - collapsed = n.copy() - if h.parallel { - var wg sync.WaitGroup - wg.Add(16) - for i := 0; i < 16; i++ { - go func(i int) { - hasher := newHasher(false) - if child := n.Children[i]; child != nil { - collapsed.Children[i], cached.Children[i] = hasher.hash(child, false) - } else { - collapsed.Children[i] = nilValueNode - } - returnHasherToPool(hasher) - wg.Done() - }(i) - } - wg.Wait() - } else { - for i := 0; i < 16; i++ { - if child := n.Children[i]; child != nil { - collapsed.Children[i], cached.Children[i] = h.hash(child, false) - } else { - collapsed.Children[i] = nilValueNode - } - } - } - return collapsed, cached -} - -// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode -// should have hex-type Key, which will be converted (without modification) -// into compact form for RLP encoding. -// If the rlp data is smaller than 32 bytes, `nil` is returned. -func (h *hasher) shortnodeToHash(n *shortNode, force bool) node { - h.tmp.Reset() - if err := rlp.Encode(&h.tmp, n); err != nil { - panic("encode error: " + err.Error()) - } - - if len(h.tmp) < 32 && !force { - return n // Nodes smaller than 32 bytes are stored inside their parent - } - return h.hashData(h.tmp) -} - -// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which -// may contain nil values) -func (h *hasher) fullnodeToHash(n *fullNode, force bool) node { - h.tmp.Reset() - // Generate the RLP encoding of the node - if err := n.EncodeRLP(&h.tmp); err != nil { - panic("encode error: " + err.Error()) - } - - if len(h.tmp) < 32 && !force { - return n // Nodes smaller than 32 bytes are stored inside their parent - } - return h.hashData(h.tmp) -} - -// hashData hashes the provided data -func (h *hasher) hashData(data []byte) hashNode { - n := make(hashNode, 32) - h.sha.Reset() - h.sha.Write(data) - h.sha.Read(n) - return n -} - -// proofHash is used to construct trie proofs, and returns the 'collapsed' -// node (for later RLP encoding) aswell as the hashed node -- unless the -// node is smaller than 32 bytes, in which case it will be returned as is. -// This method does not do anything on value- or hash-nodes. -func (h *hasher) proofHash(original node) (collapsed, hashed node) { - switch n := original.(type) { - case *shortNode: - sn, _ := h.hashShortNodeChildren(n) - return sn, h.shortnodeToHash(sn, false) - case *fullNode: - fn, _ := h.hashFullNodeChildren(n) - return fn, h.fullnodeToHash(fn, false) - default: - // Value and hash nodes don't have children so they're left as were - return n, n - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go b/vendor/github.com/ethereum/go-ethereum/trie/iterator.go deleted file mode 100644 index bb4025d..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "container/heap" - "errors" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" -) - -// Iterator is a key-value trie iterator that traverses a Trie. -type Iterator struct { - nodeIt NodeIterator - - Key []byte // Current data key on which the iterator is positioned on - Value []byte // Current data value on which the iterator is positioned on - Err error -} - -// NewIterator creates a new key-value iterator from a node iterator. -// Note that the value returned by the iterator is raw. If the content is encoded -// (e.g. storage value is RLP-encoded), it's caller's duty to decode it. -func NewIterator(it NodeIterator) *Iterator { - return &Iterator{ - nodeIt: it, - } -} - -// Next moves the iterator forward one key-value entry. -func (it *Iterator) Next() bool { - for it.nodeIt.Next(true) { - if it.nodeIt.Leaf() { - it.Key = it.nodeIt.LeafKey() - it.Value = it.nodeIt.LeafBlob() - return true - } - } - it.Key = nil - it.Value = nil - it.Err = it.nodeIt.Error() - return false -} - -// Prove generates the Merkle proof for the leaf node the iterator is currently -// positioned on. -func (it *Iterator) Prove() [][]byte { - return it.nodeIt.LeafProof() -} - -// NodeIterator is an iterator to traverse the trie pre-order. -type NodeIterator interface { - // Next moves the iterator to the next node. If the parameter is false, any child - // nodes will be skipped. - Next(bool) bool - - // Error returns the error status of the iterator. - Error() error - - // Hash returns the hash of the current node. - Hash() common.Hash - - // Parent returns the hash of the parent of the current node. The hash may be the one - // grandparent if the immediate parent is an internal node with no hash. - Parent() common.Hash - - // Path returns the hex-encoded path to the current node. - // Callers must not retain references to the return value after calling Next. - // For leaf nodes, the last element of the path is the 'terminator symbol' 0x10. - Path() []byte - - // Leaf returns true iff the current node is a leaf node. - Leaf() bool - - // LeafKey returns the key of the leaf. The method panics if the iterator is not - // positioned at a leaf. Callers must not retain references to the value after - // calling Next. - LeafKey() []byte - - // LeafBlob returns the content of the leaf. The method panics if the iterator - // is not positioned at a leaf. Callers must not retain references to the value - // after calling Next. - LeafBlob() []byte - - // LeafProof returns the Merkle proof of the leaf. The method panics if the - // iterator is not positioned at a leaf. Callers must not retain references - // to the value after calling Next. - LeafProof() [][]byte -} - -// nodeIteratorState represents the iteration state at one particular node of the -// trie, which can be resumed at a later invocation. -type nodeIteratorState struct { - hash common.Hash // Hash of the node being iterated (nil if not standalone) - node node // Trie node being iterated - parent common.Hash // Hash of the first full ancestor node (nil if current is the root) - index int // Child to be processed next - pathlen int // Length of the path to this node -} - -type nodeIterator struct { - trie *Trie // Trie being iterated - stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state - path []byte // Path to the current node - err error // Failure set in case of an internal error in the iterator -} - -// errIteratorEnd is stored in nodeIterator.err when iteration is done. -var errIteratorEnd = errors.New("end of iteration") - -// seekError is stored in nodeIterator.err if the initial seek has failed. -type seekError struct { - key []byte - err error -} - -func (e seekError) Error() string { - return "seek error: " + e.err.Error() -} - -func newNodeIterator(trie *Trie, start []byte) NodeIterator { - if trie.Hash() == emptyState { - return new(nodeIterator) - } - it := &nodeIterator{trie: trie} - it.err = it.seek(start) - return it -} - -func (it *nodeIterator) Hash() common.Hash { - if len(it.stack) == 0 { - return common.Hash{} - } - return it.stack[len(it.stack)-1].hash -} - -func (it *nodeIterator) Parent() common.Hash { - if len(it.stack) == 0 { - return common.Hash{} - } - return it.stack[len(it.stack)-1].parent -} - -func (it *nodeIterator) Leaf() bool { - return hasTerm(it.path) -} - -func (it *nodeIterator) LeafKey() []byte { - if len(it.stack) > 0 { - if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { - return hexToKeybytes(it.path) - } - } - panic("not at leaf") -} - -func (it *nodeIterator) LeafBlob() []byte { - if len(it.stack) > 0 { - if node, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { - return []byte(node) - } - } - panic("not at leaf") -} - -func (it *nodeIterator) LeafProof() [][]byte { - if len(it.stack) > 0 { - if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { - hasher := newHasher(false) - defer returnHasherToPool(hasher) - proofs := make([][]byte, 0, len(it.stack)) - - for i, item := range it.stack[:len(it.stack)-1] { - // Gather nodes that end up as hash nodes (or the root) - node, hashed := hasher.proofHash(item.node) - if _, ok := hashed.(hashNode); ok || i == 0 { - enc, _ := rlp.EncodeToBytes(node) - proofs = append(proofs, enc) - } - } - return proofs - } - } - panic("not at leaf") -} - -func (it *nodeIterator) Path() []byte { - return it.path -} - -func (it *nodeIterator) Error() error { - if it.err == errIteratorEnd { - return nil - } - if seek, ok := it.err.(seekError); ok { - return seek.err - } - return it.err -} - -// Next moves the iterator to the next node, returning whether there are any -// further nodes. In case of an internal error this method returns false and -// sets the Error field to the encountered failure. If `descend` is false, -// skips iterating over any subnodes of the current node. -func (it *nodeIterator) Next(descend bool) bool { - if it.err == errIteratorEnd { - return false - } - if seek, ok := it.err.(seekError); ok { - if it.err = it.seek(seek.key); it.err != nil { - return false - } - } - // Otherwise step forward with the iterator and report any errors. - state, parentIndex, path, err := it.peek(descend) - it.err = err - if it.err != nil { - return false - } - it.push(state, parentIndex, path) - return true -} - -func (it *nodeIterator) seek(prefix []byte) error { - // The path we're looking for is the hex encoded key without terminator. - key := keybytesToHex(prefix) - key = key[:len(key)-1] - // Move forward until we're just before the closest match to key. - for { - state, parentIndex, path, err := it.peek(bytes.HasPrefix(key, it.path)) - if err == errIteratorEnd { - return errIteratorEnd - } else if err != nil { - return seekError{prefix, err} - } else if bytes.Compare(path, key) >= 0 { - return nil - } - it.push(state, parentIndex, path) - } -} - -// peek creates the next state of the iterator. -func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) { - if len(it.stack) == 0 { - // Initialize the iterator if we've just started. - root := it.trie.Hash() - state := &nodeIteratorState{node: it.trie.root, index: -1} - if root != emptyRoot { - state.hash = root - } - err := state.resolve(it.trie, nil) - return state, nil, nil, err - } - if !descend { - // If we're skipping children, pop the current node first - it.pop() - } - - // Continue iteration to the next child - for len(it.stack) > 0 { - parent := it.stack[len(it.stack)-1] - ancestor := parent.hash - if (ancestor == common.Hash{}) { - ancestor = parent.parent - } - state, path, ok := it.nextChild(parent, ancestor) - if ok { - if err := state.resolve(it.trie, path); err != nil { - return parent, &parent.index, path, err - } - return state, &parent.index, path, nil - } - // No more child nodes, move back up. - it.pop() - } - return nil, nil, nil, errIteratorEnd -} - -func (st *nodeIteratorState) resolve(tr *Trie, path []byte) error { - if hash, ok := st.node.(hashNode); ok { - resolved, err := tr.resolveHash(hash, path) - if err != nil { - return err - } - st.node = resolved - st.hash = common.BytesToHash(hash) - } - return nil -} - -func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) { - switch node := parent.node.(type) { - case *fullNode: - // Full node, move to the first non-nil child. - for i := parent.index + 1; i < len(node.Children); i++ { - child := node.Children[i] - if child != nil { - hash, _ := child.cache() - state := &nodeIteratorState{ - hash: common.BytesToHash(hash), - node: child, - parent: ancestor, - index: -1, - pathlen: len(it.path), - } - path := append(it.path, byte(i)) - parent.index = i - 1 - return state, path, true - } - } - case *shortNode: - // Short node, return the pointer singleton child - if parent.index < 0 { - hash, _ := node.Val.cache() - state := &nodeIteratorState{ - hash: common.BytesToHash(hash), - node: node.Val, - parent: ancestor, - index: -1, - pathlen: len(it.path), - } - path := append(it.path, node.Key...) - return state, path, true - } - } - return parent, it.path, false -} - -func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []byte) { - it.path = path - it.stack = append(it.stack, state) - if parentIndex != nil { - *parentIndex++ - } -} - -func (it *nodeIterator) pop() { - parent := it.stack[len(it.stack)-1] - it.path = it.path[:parent.pathlen] - it.stack = it.stack[:len(it.stack)-1] -} - -func compareNodes(a, b NodeIterator) int { - if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 { - return cmp - } - if a.Leaf() && !b.Leaf() { - return -1 - } else if b.Leaf() && !a.Leaf() { - return 1 - } - if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 { - return cmp - } - if a.Leaf() && b.Leaf() { - return bytes.Compare(a.LeafBlob(), b.LeafBlob()) - } - return 0 -} - -type differenceIterator struct { - a, b NodeIterator // Nodes returned are those in b - a. - eof bool // Indicates a has run out of elements - count int // Number of nodes scanned on either trie -} - -// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that -// are not in a. Returns the iterator, and a pointer to an integer recording the number -// of nodes seen. -func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) { - a.Next(true) - it := &differenceIterator{ - a: a, - b: b, - } - return it, &it.count -} - -func (it *differenceIterator) Hash() common.Hash { - return it.b.Hash() -} - -func (it *differenceIterator) Parent() common.Hash { - return it.b.Parent() -} - -func (it *differenceIterator) Leaf() bool { - return it.b.Leaf() -} - -func (it *differenceIterator) LeafKey() []byte { - return it.b.LeafKey() -} - -func (it *differenceIterator) LeafBlob() []byte { - return it.b.LeafBlob() -} - -func (it *differenceIterator) LeafProof() [][]byte { - return it.b.LeafProof() -} - -func (it *differenceIterator) Path() []byte { - return it.b.Path() -} - -func (it *differenceIterator) Next(bool) bool { - // Invariants: - // - We always advance at least one element in b. - // - At the start of this function, a's path is lexically greater than b's. - if !it.b.Next(true) { - return false - } - it.count++ - - if it.eof { - // a has reached eof, so we just return all elements from b - return true - } - - for { - switch compareNodes(it.a, it.b) { - case -1: - // b jumped past a; advance a - if !it.a.Next(true) { - it.eof = true - return true - } - it.count++ - case 1: - // b is before a - return true - case 0: - // a and b are identical; skip this whole subtree if the nodes have hashes - hasHash := it.a.Hash() == common.Hash{} - if !it.b.Next(hasHash) { - return false - } - it.count++ - if !it.a.Next(hasHash) { - it.eof = true - return true - } - it.count++ - } - } -} - -func (it *differenceIterator) Error() error { - if err := it.a.Error(); err != nil { - return err - } - return it.b.Error() -} - -type nodeIteratorHeap []NodeIterator - -func (h nodeIteratorHeap) Len() int { return len(h) } -func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 } -func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) } -func (h *nodeIteratorHeap) Pop() interface{} { - n := len(*h) - x := (*h)[n-1] - *h = (*h)[0 : n-1] - return x -} - -type unionIterator struct { - items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators - count int // Number of nodes scanned across all tries -} - -// NewUnionIterator constructs a NodeIterator that iterates over elements in the union -// of the provided NodeIterators. Returns the iterator, and a pointer to an integer -// recording the number of nodes visited. -func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) { - h := make(nodeIteratorHeap, len(iters)) - copy(h, iters) - heap.Init(&h) - - ui := &unionIterator{items: &h} - return ui, &ui.count -} - -func (it *unionIterator) Hash() common.Hash { - return (*it.items)[0].Hash() -} - -func (it *unionIterator) Parent() common.Hash { - return (*it.items)[0].Parent() -} - -func (it *unionIterator) Leaf() bool { - return (*it.items)[0].Leaf() -} - -func (it *unionIterator) LeafKey() []byte { - return (*it.items)[0].LeafKey() -} - -func (it *unionIterator) LeafBlob() []byte { - return (*it.items)[0].LeafBlob() -} - -func (it *unionIterator) LeafProof() [][]byte { - return (*it.items)[0].LeafProof() -} - -func (it *unionIterator) Path() []byte { - return (*it.items)[0].Path() -} - -// Next returns the next node in the union of tries being iterated over. -// -// It does this by maintaining a heap of iterators, sorted by the iteration -// order of their next elements, with one entry for each source trie. Each -// time Next() is called, it takes the least element from the heap to return, -// advancing any other iterators that also point to that same element. These -// iterators are called with descend=false, since we know that any nodes under -// these nodes will also be duplicates, found in the currently selected iterator. -// Whenever an iterator is advanced, it is pushed back into the heap if it still -// has elements remaining. -// -// In the case that descend=false - eg, we're asked to ignore all subnodes of the -// current node - we also advance any iterators in the heap that have the current -// path as a prefix. -func (it *unionIterator) Next(descend bool) bool { - if len(*it.items) == 0 { - return false - } - - // Get the next key from the union - least := heap.Pop(it.items).(NodeIterator) - - // Skip over other nodes as long as they're identical, or, if we're not descending, as - // long as they have the same prefix as the current node. - for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) { - skipped := heap.Pop(it.items).(NodeIterator) - // Skip the whole subtree if the nodes have hashes; otherwise just skip this node - if skipped.Next(skipped.Hash() == common.Hash{}) { - it.count++ - // If there are more elements, push the iterator back on the heap - heap.Push(it.items, skipped) - } - } - if least.Next(descend) { - it.count++ - heap.Push(it.items, least) - } - return len(*it.items) > 0 -} - -func (it *unionIterator) Error() error { - for i := 0; i < len(*it.items); i++ { - if err := (*it.items)[i].Error(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/node.go b/vendor/github.com/ethereum/go-ethereum/trie/node.go deleted file mode 100644 index f4055e7..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/node.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "fmt" - "io" - "strings" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" -) - -var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"} - -type node interface { - fstring(string) string - cache() (hashNode, bool) -} - -type ( - fullNode struct { - Children [17]node // Actual trie node data to encode/decode (needs custom encoder) - flags nodeFlag - } - shortNode struct { - Key []byte - Val node - flags nodeFlag - } - hashNode []byte - valueNode []byte -) - -// nilValueNode is used when collapsing internal trie nodes for hashing, since -// unset children need to serialize correctly. -var nilValueNode = valueNode(nil) - -// EncodeRLP encodes a full node into the consensus RLP format. -func (n *fullNode) EncodeRLP(w io.Writer) error { - var nodes [17]node - - for i, child := range &n.Children { - if child != nil { - nodes[i] = child - } else { - nodes[i] = nilValueNode - } - } - return rlp.Encode(w, nodes) -} - -func (n *fullNode) copy() *fullNode { copy := *n; return © } -func (n *shortNode) copy() *shortNode { copy := *n; return © } - -// nodeFlag contains caching-related metadata about a node. -type nodeFlag struct { - hash hashNode // cached hash of the node (may be nil) - dirty bool // whether the node has changes that must be written to the database -} - -func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty } -func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty } -func (n hashNode) cache() (hashNode, bool) { return nil, true } -func (n valueNode) cache() (hashNode, bool) { return nil, true } - -// Pretty printing. -func (n *fullNode) String() string { return n.fstring("") } -func (n *shortNode) String() string { return n.fstring("") } -func (n hashNode) String() string { return n.fstring("") } -func (n valueNode) String() string { return n.fstring("") } - -func (n *fullNode) fstring(ind string) string { - resp := fmt.Sprintf("[\n%s ", ind) - for i, node := range &n.Children { - if node == nil { - resp += fmt.Sprintf("%s: ", indices[i]) - } else { - resp += fmt.Sprintf("%s: %v", indices[i], node.fstring(ind+" ")) - } - } - return resp + fmt.Sprintf("\n%s] ", ind) -} -func (n *shortNode) fstring(ind string) string { - return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" ")) -} -func (n hashNode) fstring(ind string) string { - return fmt.Sprintf("<%x> ", []byte(n)) -} -func (n valueNode) fstring(ind string) string { - return fmt.Sprintf("%x ", []byte(n)) -} - -func mustDecodeNode(hash, buf []byte) node { - n, err := decodeNode(hash, buf) - if err != nil { - panic(fmt.Sprintf("node %x: %v", hash, err)) - } - return n -} - -// decodeNode parses the RLP encoding of a trie node. -func decodeNode(hash, buf []byte) (node, error) { - if len(buf) == 0 { - return nil, io.ErrUnexpectedEOF - } - elems, _, err := rlp.SplitList(buf) - if err != nil { - return nil, fmt.Errorf("decode error: %v", err) - } - switch c, _ := rlp.CountValues(elems); c { - case 2: - n, err := decodeShort(hash, elems) - return n, wrapError(err, "short") - case 17: - n, err := decodeFull(hash, elems) - return n, wrapError(err, "full") - default: - return nil, fmt.Errorf("invalid number of list elements: %v", c) - } -} - -func decodeShort(hash, elems []byte) (node, error) { - kbuf, rest, err := rlp.SplitString(elems) - if err != nil { - return nil, err - } - flag := nodeFlag{hash: hash} - key := compactToHex(kbuf) - if hasTerm(key) { - // value node - val, _, err := rlp.SplitString(rest) - if err != nil { - return nil, fmt.Errorf("invalid value node: %v", err) - } - return &shortNode{key, append(valueNode{}, val...), flag}, nil - } - r, _, err := decodeRef(rest) - if err != nil { - return nil, wrapError(err, "val") - } - return &shortNode{key, r, flag}, nil -} - -func decodeFull(hash, elems []byte) (*fullNode, error) { - n := &fullNode{flags: nodeFlag{hash: hash}} - for i := 0; i < 16; i++ { - cld, rest, err := decodeRef(elems) - if err != nil { - return n, wrapError(err, fmt.Sprintf("[%d]", i)) - } - n.Children[i], elems = cld, rest - } - val, _, err := rlp.SplitString(elems) - if err != nil { - return n, err - } - if len(val) > 0 { - n.Children[16] = append(valueNode{}, val...) - } - return n, nil -} - -const hashLen = len(common.Hash{}) - -func decodeRef(buf []byte) (node, []byte, error) { - kind, val, rest, err := rlp.Split(buf) - if err != nil { - return nil, buf, err - } - switch { - case kind == rlp.List: - // 'embedded' node reference. The encoding must be smaller - // than a hash in order to be valid. - if size := len(buf) - len(rest); size > hashLen { - err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen) - return nil, buf, err - } - n, err := decodeNode(nil, buf) - return n, rest, err - case kind == rlp.String && len(val) == 0: - // empty node - return nil, rest, nil - case kind == rlp.String && len(val) == 32: - return append(hashNode{}, val...), rest, nil - default: - return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0 or 32)", len(val)) - } -} - -// wraps a decoding error with information about the path to the -// invalid child node (for debugging encoding issues). -type decodeError struct { - what error - stack []string -} - -func wrapError(err error, ctx string) error { - if err == nil { - return nil - } - if decErr, ok := err.(*decodeError); ok { - decErr.stack = append(decErr.stack, ctx) - return decErr - } - return &decodeError{err, []string{ctx}} -} - -func (err *decodeError) Error() string { - return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-")) -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/proof.go b/vendor/github.com/ethereum/go-ethereum/trie/proof.go deleted file mode 100644 index 58ca69c..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/proof.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" -) - -// Prove constructs a merkle proof for key. The result contains all encoded nodes -// on the path to the value at key. The value itself is also included in the last -// node and can be retrieved by verifying the proof. -// -// If the trie does not contain a value for key, the returned proof contains all -// nodes of the longest existing prefix of the key (at least the root node), ending -// with the node that proves the absence of the key. -func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { - // Collect all nodes on the path to key. - key = keybytesToHex(key) - var nodes []node - tn := t.root - for len(key) > 0 && tn != nil { - switch n := tn.(type) { - case *shortNode: - if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { - // The trie doesn't contain the key. - tn = nil - } else { - tn = n.Val - key = key[len(n.Key):] - } - nodes = append(nodes, n) - case *fullNode: - tn = n.Children[key[0]] - key = key[1:] - nodes = append(nodes, n) - case hashNode: - var err error - tn, err = t.resolveHash(n, nil) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return err - } - default: - panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) - } - } - hasher := newHasher(false) - defer returnHasherToPool(hasher) - - for i, n := range nodes { - if fromLevel > 0 { - fromLevel-- - continue - } - var hn node - n, hn = hasher.proofHash(n) - if hash, ok := hn.(hashNode); ok || i == 0 { - // If the node's database encoding is a hash (or is the - // root node), it becomes a proof element. - enc, _ := rlp.EncodeToBytes(n) - if !ok { - hash = hasher.hashData(enc) - } - proofDb.Put(hash, enc) - } - } - return nil -} - -// Prove constructs a merkle proof for key. The result contains all encoded nodes -// on the path to the value at key. The value itself is also included in the last -// node and can be retrieved by verifying the proof. -// -// If the trie does not contain a value for key, the returned proof contains all -// nodes of the longest existing prefix of the key (at least the root node), ending -// with the node that proves the absence of the key. -func (t *SecureTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { - return t.trie.Prove(key, fromLevel, proofDb) -} - -// VerifyProof checks merkle proofs. The given proof must contain the value for -// key in a trie with the given root hash. VerifyProof returns an error if the -// proof contains invalid trie nodes or the wrong value. -func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader) (value []byte, nodes int, err error) { - key = keybytesToHex(key) - wantHash := rootHash - for i := 0; ; i++ { - buf, _ := proofDb.Get(wantHash[:]) - if buf == nil { - return nil, i, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash) - } - n, err := decodeNode(wantHash[:], buf) - if err != nil { - return nil, i, fmt.Errorf("bad proof node %d: %v", i, err) - } - keyrest, cld := get(n, key) - switch cld := cld.(type) { - case nil: - // The trie doesn't contain the key. - return nil, i, nil - case hashNode: - key = keyrest - copy(wantHash[:], cld) - case valueNode: - return cld, i + 1, nil - } - } -} - -func get(tn node, key []byte) ([]byte, node) { - for { - switch n := tn.(type) { - case *shortNode: - if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { - return nil, nil - } - tn = n.Val - key = key[len(n.Key):] - case *fullNode: - tn = n.Children[key[0]] - key = key[1:] - case hashNode: - return key, n - case nil: - return key, nil - case valueNode: - return nil, n - default: - panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) - } - } -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/secure_trie.go b/vendor/github.com/ethereum/go-ethereum/trie/secure_trie.go deleted file mode 100644 index 9557714..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/secure_trie.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -// SecureTrie wraps a trie with key hashing. In a secure trie, all -// access operations hash the key using keccak256. This prevents -// calling code from creating long chains of nodes that -// increase the access time. -// -// Contrary to a regular trie, a SecureTrie can only be created with -// New and must have an attached database. The database also stores -// the preimage of each key. -// -// SecureTrie is not safe for concurrent use. -type SecureTrie struct { - trie Trie - hashKeyBuf [common.HashLength]byte - secKeyCache map[string][]byte - secKeyCacheOwner *SecureTrie // Pointer to self, replace the key cache on mismatch -} - -// NewSecure creates a trie with an existing root node from a backing database -// and optional intermediate in-memory node pool. -// -// If root is the zero hash or the sha3 hash of an empty string, the -// trie is initially empty. Otherwise, New will panic if db is nil -// and returns MissingNodeError if the root node cannot be found. -// -// Accessing the trie loads nodes from the database or node pool on demand. -// Loaded nodes are kept around until their 'cache generation' expires. -// A new cache generation is created by each call to Commit. -// cachelimit sets the number of past cache generations to keep. -func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) { - if db == nil { - panic("trie.NewSecure called without a database") - } - trie, err := New(root, db) - if err != nil { - return nil, err - } - return &SecureTrie{trie: *trie}, nil -} - -// Get returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -func (t *SecureTrie) Get(key []byte) []byte { - res, err := t.TryGet(key) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } - return res -} - -// TryGet returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *SecureTrie) TryGet(key []byte) ([]byte, error) { - return t.trie.TryGet(t.hashKey(key)) -} - -// Update associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -func (t *SecureTrie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryUpdate associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -// -// If a node was not found in the database, a MissingNodeError is returned. -func (t *SecureTrie) TryUpdate(key, value []byte) error { - hk := t.hashKey(key) - err := t.trie.TryUpdate(hk, value) - if err != nil { - return err - } - t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) - return nil -} - -// Delete removes any existing value for key from the trie. -func (t *SecureTrie) Delete(key []byte) { - if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *SecureTrie) TryDelete(key []byte) error { - hk := t.hashKey(key) - delete(t.getSecKeyCache(), string(hk)) - return t.trie.TryDelete(hk) -} - -// GetKey returns the sha3 preimage of a hashed key that was -// previously used to store a value. -func (t *SecureTrie) GetKey(shaKey []byte) []byte { - if key, ok := t.getSecKeyCache()[string(shaKey)]; ok { - return key - } - key, _ := t.trie.db.preimage(common.BytesToHash(shaKey)) - return key -} - -// Commit writes all nodes and the secure hash pre-images to the trie's database. -// Nodes are stored with their sha3 hash as the key. -// -// Committing flushes nodes from memory. Subsequent Get calls will load nodes -// from the database. -func (t *SecureTrie) Commit(onleaf LeafCallback) (root common.Hash, err error) { - // Write all the pre-images to the actual disk database - if len(t.getSecKeyCache()) > 0 { - t.trie.db.lock.Lock() - for hk, key := range t.secKeyCache { - t.trie.db.insertPreimage(common.BytesToHash([]byte(hk)), key) - } - t.trie.db.lock.Unlock() - - t.secKeyCache = make(map[string][]byte) - } - // Commit the trie to its intermediate node database - return t.trie.Commit(onleaf) -} - -// Hash returns the root hash of SecureTrie. It does not write to the -// database and can be used even if the trie doesn't have one. -func (t *SecureTrie) Hash() common.Hash { - return t.trie.Hash() -} - -// Copy returns a copy of SecureTrie. -func (t *SecureTrie) Copy() *SecureTrie { - cpy := *t - return &cpy -} - -// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration -// starts at the key after the given start key. -func (t *SecureTrie) NodeIterator(start []byte) NodeIterator { - return t.trie.NodeIterator(start) -} - -// hashKey returns the hash of key as an ephemeral buffer. -// The caller must not hold onto the return value because it will become -// invalid on the next call to hashKey or secKey. -func (t *SecureTrie) hashKey(key []byte) []byte { - h := newHasher(false) - h.sha.Reset() - h.sha.Write(key) - buf := h.sha.Sum(t.hashKeyBuf[:0]) - returnHasherToPool(h) - return buf -} - -// getSecKeyCache returns the current secure key cache, creating a new one if -// ownership changed (i.e. the current secure trie is a copy of another owning -// the actual cache). -func (t *SecureTrie) getSecKeyCache() map[string][]byte { - if t != t.secKeyCacheOwner { - t.secKeyCacheOwner = t - t.secKeyCache = make(map[string][]byte) - } - return t.secKeyCache -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/sync.go b/vendor/github.com/ethereum/go-ethereum/trie/sync.go deleted file mode 100644 index e5a0c17..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/sync.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "errors" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/ethdb" -) - -// ErrNotRequested is returned by the trie sync when it's requested to process a -// node it did not request. -var ErrNotRequested = errors.New("not requested") - -// ErrAlreadyProcessed is returned by the trie sync when it's requested to process a -// node it already processed previously. -var ErrAlreadyProcessed = errors.New("already processed") - -// request represents a scheduled or already in-flight state retrieval request. -type request struct { - hash common.Hash // Hash of the node data content to retrieve - data []byte // Data content of the node, cached until all subtrees complete - raw bool // Whether this is a raw entry (code) or a trie node - - parents []*request // Parent state nodes referencing this entry (notify all upon completion) - depth int // Depth level within the trie the node is located to prioritise DFS - deps int // Number of dependencies before allowed to commit this node - - callback LeafCallback // Callback to invoke if a leaf node it reached on this branch -} - -// SyncResult is a simple list to return missing nodes along with their request -// hashes. -type SyncResult struct { - Hash common.Hash // Hash of the originally unknown trie node - Data []byte // Data content of the retrieved node -} - -// syncMemBatch is an in-memory buffer of successfully downloaded but not yet -// persisted data items. -type syncMemBatch struct { - batch map[common.Hash][]byte // In-memory membatch of recently completed items -} - -// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. -func newSyncMemBatch() *syncMemBatch { - return &syncMemBatch{ - batch: make(map[common.Hash][]byte), - } -} - -// Sync is the main state trie synchronisation scheduler, which provides yet -// unknown trie hashes to retrieve, accepts node data associated with said hashes -// and reconstructs the trie step by step until all is done. -type Sync struct { - database ethdb.KeyValueReader // Persistent database to check for existing entries - membatch *syncMemBatch // Memory buffer to avoid frequent database writes - requests map[common.Hash]*request // Pending requests pertaining to a key hash - queue *prque.Prque // Priority queue with the pending requests - bloom *SyncBloom // Bloom filter for fast node existence checks -} - -// NewSync creates a new trie data download scheduler. -func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync { - ts := &Sync{ - database: database, - membatch: newSyncMemBatch(), - requests: make(map[common.Hash]*request), - queue: prque.New(nil), - bloom: bloom, - } - ts.AddSubTrie(root, 0, common.Hash{}, callback) - return ts -} - -// AddSubTrie registers a new trie to the sync code, rooted at the designated parent. -func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) { - // Short circuit if the trie is empty or already known - if root == emptyRoot { - return - } - if _, ok := s.membatch.batch[root]; ok { - return - } - if s.bloom.Contains(root[:]) { - // Bloom filter says this might be a duplicate, double check - blob, _ := s.database.Get(root[:]) - if local, err := decodeNode(root[:], blob); local != nil && err == nil { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) - } - // Assemble the new sub-trie sync request - req := &request{ - hash: root, - depth: depth, - callback: callback, - } - // If this sub-trie has a designated parent, link them together - if parent != (common.Hash{}) { - ancestor := s.requests[parent] - if ancestor == nil { - panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) - } - ancestor.deps++ - req.parents = append(req.parents, ancestor) - } - s.schedule(req) -} - -// AddRawEntry schedules the direct retrieval of a state entry that should not be -// interpreted as a trie node, but rather accepted and stored into the database -// as is. This method's goal is to support misc state metadata retrievals (e.g. -// contract code). -func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { - // Short circuit if the entry is empty or already known - if hash == emptyState { - return - } - if _, ok := s.membatch.batch[hash]; ok { - return - } - if s.bloom.Contains(hash[:]) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(hash[:]); ok { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) - } - // Assemble the new sub-trie sync request - req := &request{ - hash: hash, - raw: true, - depth: depth, - } - // If this sub-trie has a designated parent, link them together - if parent != (common.Hash{}) { - ancestor := s.requests[parent] - if ancestor == nil { - panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) - } - ancestor.deps++ - req.parents = append(req.parents, ancestor) - } - s.schedule(req) -} - -// Missing retrieves the known missing nodes from the trie for retrieval. -func (s *Sync) Missing(max int) []common.Hash { - var requests []common.Hash - for !s.queue.Empty() && (max == 0 || len(requests) < max) { - requests = append(requests, s.queue.PopItem().(common.Hash)) - } - return requests -} - -// Process injects a batch of retrieved trie nodes data, returning if something -// was committed to the database and also the index of an entry if processing of -// it failed. -func (s *Sync) Process(results []SyncResult) (bool, int, error) { - committed := false - - for i, item := range results { - // If the item was not requested, bail out - request := s.requests[item.Hash] - if request == nil { - return committed, i, ErrNotRequested - } - if request.data != nil { - return committed, i, ErrAlreadyProcessed - } - // If the item is a raw entry request, commit directly - if request.raw { - request.data = item.Data - s.commit(request) - committed = true - continue - } - // Decode the node data content and update the request - node, err := decodeNode(item.Hash[:], item.Data) - if err != nil { - return committed, i, err - } - request.data = item.Data - - // Create and schedule a request for all the children nodes - requests, err := s.children(request, node) - if err != nil { - return committed, i, err - } - if len(requests) == 0 && request.deps == 0 { - s.commit(request) - committed = true - continue - } - request.deps += len(requests) - for _, child := range requests { - s.schedule(child) - } - } - return committed, 0, nil -} - -// Commit flushes the data stored in the internal membatch out to persistent -// storage, returning any occurred error. -func (s *Sync) Commit(dbw ethdb.Batch) error { - // Dump the membatch into a database dbw - for key, value := range s.membatch.batch { - if err := dbw.Put(key[:], value); err != nil { - return err - } - s.bloom.Add(key[:]) - } - // Drop the membatch data and return - s.membatch = newSyncMemBatch() - return nil -} - -// Pending returns the number of state entries currently pending for download. -func (s *Sync) Pending() int { - return len(s.requests) -} - -// schedule inserts a new state retrieval request into the fetch queue. If there -// is already a pending request for this node, the new request will be discarded -// and only a parent reference added to the old one. -func (s *Sync) schedule(req *request) { - // If we're already requesting this node, add a new reference and stop - if old, ok := s.requests[req.hash]; ok { - old.parents = append(old.parents, req.parents...) - return - } - // Schedule the request for future retrieval - s.queue.Push(req.hash, int64(req.depth)) - s.requests[req.hash] = req -} - -// children retrieves all the missing children of a state trie entry for future -// retrieval scheduling. -func (s *Sync) children(req *request, object node) ([]*request, error) { - // Gather all the children of the node, irrelevant whether known or not - type child struct { - node node - depth int - } - var children []child - - switch node := (object).(type) { - case *shortNode: - children = []child{{ - node: node.Val, - depth: req.depth + len(node.Key), - }} - case *fullNode: - for i := 0; i < 17; i++ { - if node.Children[i] != nil { - children = append(children, child{ - node: node.Children[i], - depth: req.depth + 1, - }) - } - } - default: - panic(fmt.Sprintf("unknown node: %+v", node)) - } - // Iterate over the children, and request all unknown ones - requests := make([]*request, 0, len(children)) - for _, child := range children { - // Notify any external watcher of a new key/value node - if req.callback != nil { - if node, ok := (child.node).(valueNode); ok { - if err := req.callback(node, req.hash); err != nil { - return nil, err - } - } - } - // If the child references another node, resolve or schedule - if node, ok := (child.node).(hashNode); ok { - // Try to resolve the node from the local database - hash := common.BytesToHash(node) - if _, ok := s.membatch.batch[hash]; ok { - continue - } - if s.bloom.Contains(node) { - // Bloom filter says this might be a duplicate, double check - if ok, _ := s.database.Has(node); ok { - continue - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) - } - // Locally unknown node, schedule for retrieval - requests = append(requests, &request{ - hash: hash, - parents: []*request{req}, - depth: child.depth, - callback: req.callback, - }) - } - } - return requests, nil -} - -// commit finalizes a retrieval request and stores it into the membatch. If any -// of the referencing parent requests complete due to this commit, they are also -// committed themselves. -func (s *Sync) commit(req *request) (err error) { - // Write the node content to the membatch - s.membatch.batch[req.hash] = req.data - - delete(s.requests, req.hash) - - // Check all parents for completion - for _, parent := range req.parents { - parent.deps-- - if parent.deps == 0 { - if err := s.commit(parent); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/sync_bloom.go b/vendor/github.com/ethereum/go-ethereum/trie/sync_bloom.go deleted file mode 100644 index 2182d1c..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/sync_bloom.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "encoding/binary" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/steakknife/bloomfilter" -) - -var ( - bloomAddMeter = metrics.NewRegisteredMeter("trie/bloom/add", nil) - bloomLoadMeter = metrics.NewRegisteredMeter("trie/bloom/load", nil) - bloomTestMeter = metrics.NewRegisteredMeter("trie/bloom/test", nil) - bloomMissMeter = metrics.NewRegisteredMeter("trie/bloom/miss", nil) - bloomFaultMeter = metrics.NewRegisteredMeter("trie/bloom/fault", nil) - bloomErrorGauge = metrics.NewRegisteredGauge("trie/bloom/error", nil) -) - -// syncBloomHasher is a wrapper around a byte blob to satisfy the interface API -// requirements of the bloom library used. It's used to convert a trie hash into -// a 64 bit mini hash. -type syncBloomHasher []byte - -func (f syncBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } -func (f syncBloomHasher) Sum(b []byte) []byte { panic("not implemented") } -func (f syncBloomHasher) Reset() { panic("not implemented") } -func (f syncBloomHasher) BlockSize() int { panic("not implemented") } -func (f syncBloomHasher) Size() int { return 8 } -func (f syncBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) } - -// SyncBloom is a bloom filter used during fast sync to quickly decide if a trie -// node already exists on disk or not. It self populates from the provided disk -// database on creation in a background thread and will only start returning live -// results once that's finished. -type SyncBloom struct { - bloom *bloomfilter.Filter - inited uint32 - closer sync.Once - closed uint32 - pend sync.WaitGroup -} - -// NewSyncBloom creates a new bloom filter of the given size (in megabytes) and -// initializes it from the database. The bloom is hard coded to use 3 filters. -func NewSyncBloom(memory uint64, database ethdb.Iteratee) *SyncBloom { - // Create the bloom filter to track known trie nodes - bloom, err := bloomfilter.New(memory*1024*1024*8, 3) - if err != nil { - panic(fmt.Sprintf("failed to create bloom: %v", err)) - } - log.Info("Allocated fast sync bloom", "size", common.StorageSize(memory*1024*1024)) - - // Assemble the fast sync bloom and init it from previous sessions - b := &SyncBloom{ - bloom: bloom, - } - b.pend.Add(2) - go func() { - defer b.pend.Done() - b.init(database) - }() - go func() { - defer b.pend.Done() - b.meter() - }() - return b -} - -// init iterates over the database, pushing every trie hash into the bloom filter. -func (b *SyncBloom) init(database ethdb.Iteratee) { - // Iterate over the database, but restart every now and again to avoid holding - // a persistent snapshot since fast sync can push a ton of data concurrently, - // bloating the disk. - // - // Note, this is fine, because everything inserted into leveldb by fast sync is - // also pushed into the bloom directly, so we're not missing anything when the - // iterator is swapped out for a new one. - it := database.NewIterator() - - var ( - start = time.Now() - swap = time.Now() - ) - for it.Next() && atomic.LoadUint32(&b.closed) == 0 { - // If the database entry is a trie node, add it to the bloom - if key := it.Key(); len(key) == common.HashLength { - b.bloom.Add(syncBloomHasher(key)) - bloomLoadMeter.Mark(1) - } - // If enough time elapsed since the last iterator swap, restart - if time.Since(swap) > 8*time.Second { - key := common.CopyBytes(it.Key()) - - it.Release() - it = database.NewIteratorWithStart(key) - - log.Info("Initializing fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate(), "elapsed", common.PrettyDuration(time.Since(start))) - swap = time.Now() - } - } - it.Release() - - // Mark the bloom filter inited and return - log.Info("Initialized fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate(), "elapsed", common.PrettyDuration(time.Since(start))) - atomic.StoreUint32(&b.inited, 1) -} - -// meter periodically recalculates the false positive error rate of the bloom -// filter and reports it in a metric. -func (b *SyncBloom) meter() { - for { - // Report the current error ration. No floats, lame, scale it up. - bloomErrorGauge.Update(int64(b.errorRate() * 100000)) - - // Wait one second, but check termination more frequently - for i := 0; i < 10; i++ { - if atomic.LoadUint32(&b.closed) == 1 { - return - } - time.Sleep(100 * time.Millisecond) - } - } -} - -// Close terminates any background initializer still running and releases all the -// memory allocated for the bloom. -func (b *SyncBloom) Close() error { - b.closer.Do(func() { - // Ensure the initializer is stopped - atomic.StoreUint32(&b.closed, 1) - b.pend.Wait() - - // Wipe the bloom, but mark it "uninited" just in case someone attempts an access - log.Info("Deallocated fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate()) - - atomic.StoreUint32(&b.inited, 0) - b.bloom = nil - }) - return nil -} - -// Add inserts a new trie node hash into the bloom filter. -func (b *SyncBloom) Add(hash []byte) { - if atomic.LoadUint32(&b.closed) == 1 { - return - } - b.bloom.Add(syncBloomHasher(hash)) - bloomAddMeter.Mark(1) -} - -// Contains tests if the bloom filter contains the given hash: -// - false: the bloom definitely does not contain hash -// - true: the bloom maybe contains hash -// -// While the bloom is being initialized, any query will return true. -func (b *SyncBloom) Contains(hash []byte) bool { - bloomTestMeter.Mark(1) - if atomic.LoadUint32(&b.inited) == 0 { - // We didn't load all the trie nodes from the previous run of Geth yet. As - // such, we can't say for sure if a hash is not present for anything. Until - // the init is done, we're faking "possible presence" for everything. - return true - } - // Bloom initialized, check the real one and report any successful misses - maybe := b.bloom.Contains(syncBloomHasher(hash)) - if !maybe { - bloomMissMeter.Mark(1) - } - return maybe -} - -// errorRate calculates the probability of a random containment test returning a -// false positive. -// -// We're calculating it ourselves because the bloom library we used missed a -// parentheses in the formula and calculates it wrong. And it's discontinued... -func (b *SyncBloom) errorRate() float64 { - k := float64(b.bloom.K()) - n := float64(b.bloom.N()) - m := float64(b.bloom.M()) - - return math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k) -} diff --git a/vendor/github.com/ethereum/go-ethereum/trie/trie.go b/vendor/github.com/ethereum/go-ethereum/trie/trie.go deleted file mode 100644 index 78e2eff..0000000 --- a/vendor/github.com/ethereum/go-ethereum/trie/trie.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package trie implements Merkle Patricia Tries. -package trie - -import ( - "bytes" - "fmt" - "sync" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" -) - -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyState is the known hash of an empty state trie entry. - emptyState = crypto.Keccak256Hash(nil) -) - -// LeafCallback is a callback type invoked when a trie operation reaches a leaf -// node. It's used by state sync and commit to allow handling external references -// between account and storage tries. -type LeafCallback func(leaf []byte, parent common.Hash) error - -// Trie is a Merkle Patricia Trie. -// The zero value is an empty trie with no database. -// Use New to create a trie that sits on top of a database. -// -// Trie is not safe for concurrent use. -type Trie struct { - db *Database - root node - // Keep track of the number leafs which have been inserted since the last - // hashing operation. This number will not directly map to the number of - // actually unhashed nodes - unhashed int -} - -// newFlag returns the cache flag value for a newly created node. -func (t *Trie) newFlag() nodeFlag { - return nodeFlag{dirty: true} -} - -// New creates a trie with an existing root node from db. -// -// If root is the zero hash or the sha3 hash of an empty string, the -// trie is initially empty and does not require a database. Otherwise, -// New will panic if db is nil and returns a MissingNodeError if root does -// not exist in the database. Accessing the trie loads nodes from db on demand. -func New(root common.Hash, db *Database) (*Trie, error) { - if db == nil { - panic("trie.New called without a database") - } - trie := &Trie{ - db: db, - } - if root != (common.Hash{}) && root != emptyRoot { - rootnode, err := trie.resolveHash(root[:], nil) - if err != nil { - return nil, err - } - trie.root = rootnode - } - return trie, nil -} - -// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at -// the key after the given start key. -func (t *Trie) NodeIterator(start []byte) NodeIterator { - return newNodeIterator(t, start) -} - -// Get returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -func (t *Trie) Get(key []byte) []byte { - res, err := t.TryGet(key) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } - return res -} - -// TryGet returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryGet(key []byte) ([]byte, error) { - key = keybytesToHex(key) - value, newroot, didResolve, err := t.tryGet(t.root, key, 0) - if err == nil && didResolve { - t.root = newroot - } - return value, err -} - -func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { - switch n := (origNode).(type) { - case nil: - return nil, nil, false, nil - case valueNode: - return n, n, false, nil - case *shortNode: - if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { - // key not found in trie - return nil, n, false, nil - } - value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key)) - if err == nil && didResolve { - n = n.copy() - n.Val = newnode - } - return value, n, didResolve, err - case *fullNode: - value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1) - if err == nil && didResolve { - n = n.copy() - n.Children[key[pos]] = newnode - } - return value, n, didResolve, err - case hashNode: - child, err := t.resolveHash(n, key[:pos]) - if err != nil { - return nil, n, true, err - } - value, newnode, _, err := t.tryGet(child, key, pos) - return value, newnode, true, err - default: - panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) - } -} - -// Update associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -func (t *Trie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryUpdate associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -// -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryUpdate(key, value []byte) error { - t.unhashed++ - k := keybytesToHex(key) - if len(value) != 0 { - _, n, err := t.insert(t.root, nil, k, valueNode(value)) - if err != nil { - return err - } - t.root = n - } else { - _, n, err := t.delete(t.root, nil, k) - if err != nil { - return err - } - t.root = n - } - return nil -} - -func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) { - if len(key) == 0 { - if v, ok := n.(valueNode); ok { - return !bytes.Equal(v, value.(valueNode)), value, nil - } - return true, value, nil - } - switch n := n.(type) { - case *shortNode: - matchlen := prefixLen(key, n.Key) - // If the whole key matches, keep this short node as is - // and only update the value. - if matchlen == len(n.Key) { - dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value) - if !dirty || err != nil { - return false, n, err - } - return true, &shortNode{n.Key, nn, t.newFlag()}, nil - } - // Otherwise branch out at the index where they differ. - branch := &fullNode{flags: t.newFlag()} - var err error - _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val) - if err != nil { - return false, nil, err - } - _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) - if err != nil { - return false, nil, err - } - // Replace this shortNode with the branch if it occurs at index 0. - if matchlen == 0 { - return true, branch, nil - } - // Otherwise, replace it with a short node leading up to the branch. - return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil - - case *fullNode: - dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value) - if !dirty || err != nil { - return false, n, err - } - n = n.copy() - n.flags = t.newFlag() - n.Children[key[0]] = nn - return true, n, nil - - case nil: - return true, &shortNode{key, value, t.newFlag()}, nil - - case hashNode: - // We've hit a part of the trie that isn't loaded yet. Load - // the node and insert into it. This leaves all child nodes on - // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) - if err != nil { - return false, nil, err - } - dirty, nn, err := t.insert(rn, prefix, key, value) - if !dirty || err != nil { - return false, rn, err - } - return true, nn, nil - - default: - panic(fmt.Sprintf("%T: invalid node: %v", n, n)) - } -} - -// Delete removes any existing value for key from the trie. -func (t *Trie) Delete(key []byte) { - if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryDelete(key []byte) error { - t.unhashed++ - k := keybytesToHex(key) - _, n, err := t.delete(t.root, nil, k) - if err != nil { - return err - } - t.root = n - return nil -} - -// delete returns the new root of the trie with key deleted. -// It reduces the trie to minimal form by simplifying -// nodes on the way up after deleting recursively. -func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { - switch n := n.(type) { - case *shortNode: - matchlen := prefixLen(key, n.Key) - if matchlen < len(n.Key) { - return false, n, nil // don't replace n on mismatch - } - if matchlen == len(key) { - return true, nil, nil // remove n entirely for whole matches - } - // The key is longer than n.Key. Remove the remaining suffix - // from the subtrie. Child can never be nil here since the - // subtrie must contain at least two other values with keys - // longer than n.Key. - dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):]) - if !dirty || err != nil { - return false, n, err - } - switch child := child.(type) { - case *shortNode: - // Deleting from the subtrie reduced it to another - // short node. Merge the nodes to avoid creating a - // shortNode{..., shortNode{...}}. Use concat (which - // always creates a new slice) instead of append to - // avoid modifying n.Key since it might be shared with - // other nodes. - return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil - default: - return true, &shortNode{n.Key, child, t.newFlag()}, nil - } - - case *fullNode: - dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:]) - if !dirty || err != nil { - return false, n, err - } - n = n.copy() - n.flags = t.newFlag() - n.Children[key[0]] = nn - - // Check how many non-nil entries are left after deleting and - // reduce the full node to a short node if only one entry is - // left. Since n must've contained at least two children - // before deletion (otherwise it would not be a full node) n - // can never be reduced to nil. - // - // When the loop is done, pos contains the index of the single - // value that is left in n or -2 if n contains at least two - // values. - pos := -1 - for i, cld := range &n.Children { - if cld != nil { - if pos == -1 { - pos = i - } else { - pos = -2 - break - } - } - } - if pos >= 0 { - if pos != 16 { - // If the remaining entry is a short node, it replaces - // n and its key gets the missing nibble tacked to the - // front. This avoids creating an invalid - // shortNode{..., shortNode{...}}. Since the entry - // might not be loaded yet, resolve it just for this - // check. - cnode, err := t.resolve(n.Children[pos], prefix) - if err != nil { - return false, nil, err - } - if cnode, ok := cnode.(*shortNode); ok { - k := append([]byte{byte(pos)}, cnode.Key...) - return true, &shortNode{k, cnode.Val, t.newFlag()}, nil - } - } - // Otherwise, n is replaced by a one-nibble short node - // containing the child. - return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil - } - // n still contains at least two values and cannot be reduced. - return true, n, nil - - case valueNode: - return true, nil, nil - - case nil: - return false, nil, nil - - case hashNode: - // We've hit a part of the trie that isn't loaded yet. Load - // the node and delete from it. This leaves all child nodes on - // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) - if err != nil { - return false, nil, err - } - dirty, nn, err := t.delete(rn, prefix, key) - if !dirty || err != nil { - return false, rn, err - } - return true, nn, nil - - default: - panic(fmt.Sprintf("%T: invalid node: %v (%v)", n, n, key)) - } -} - -func concat(s1 []byte, s2 ...byte) []byte { - r := make([]byte, len(s1)+len(s2)) - copy(r, s1) - copy(r[len(s1):], s2) - return r -} - -func (t *Trie) resolve(n node, prefix []byte) (node, error) { - if n, ok := n.(hashNode); ok { - return t.resolveHash(n, prefix) - } - return n, nil -} - -func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { - hash := common.BytesToHash(n) - if node := t.db.node(hash); node != nil { - return node, nil - } - return nil, &MissingNodeError{NodeHash: hash, Path: prefix} -} - -// Hash returns the root hash of the trie. It does not write to the -// database and can be used even if the trie doesn't have one. -func (t *Trie) Hash() common.Hash { - hash, cached, _ := t.hashRoot(nil) - t.root = cached - return common.BytesToHash(hash.(hashNode)) -} - -// Commit writes all nodes to the trie's memory database, tracking the internal -// and external (for account tries) references. -func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) { - if t.db == nil { - panic("commit called on trie with nil database") - } - if t.root == nil { - return emptyRoot, nil - } - rootHash := t.Hash() - h := newCommitter() - defer returnCommitterToPool(h) - // Do a quick check if we really need to commit, before we spin - // up goroutines. This can happen e.g. if we load a trie for reading storage - // values, but don't write to it. - if !h.commitNeeded(t.root) { - return rootHash, nil - } - var wg sync.WaitGroup - if onleaf != nil { - h.onleaf = onleaf - h.leafCh = make(chan *leaf, leafChanSize) - wg.Add(1) - go func() { - defer wg.Done() - h.commitLoop(t.db) - }() - } - var newRoot hashNode - newRoot, err = h.Commit(t.root, t.db) - if onleaf != nil { - // The leafch is created in newCommitter if there was an onleaf callback - // provided. The commitLoop only _reads_ from it, and the commit - // operation was the sole writer. Therefore, it's safe to close this - // channel here. - close(h.leafCh) - wg.Wait() - } - if err != nil { - return common.Hash{}, err - } - t.root = newRoot - return rootHash, nil -} - -// hashRoot calculates the root hash of the given trie -func (t *Trie) hashRoot(db *Database) (node, node, error) { - if t.root == nil { - return hashNode(emptyRoot.Bytes()), nil, nil - } - // If the number of changes is below 100, we let one thread handle it - h := newHasher(t.unhashed >= 100) - defer returnHasherToPool(h) - hashed, cached := h.hash(t.root, true) - t.unhashed = 0 - return hashed, cached, nil -} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml deleted file mode 100644 index 0c2c02b..0000000 --- a/vendor/github.com/go-ole/go-ole/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -sudo: false - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md deleted file mode 100644 index 4ba6a8c..0000000 --- a/vendor/github.com/go-ole/go-ole/ChangeLog.md +++ /dev/null @@ -1,49 +0,0 @@ -# Version 1.x.x - -* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) - -# Version 1.2.0-alphaX - -**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** - - * Added CI configuration for Travis-CI and AppVeyor. - * Added test InterfaceID and ClassID for the COM Test Server project. - * Added more inline documentation (#83). - * Added IEnumVARIANT implementation (#88). - * Added IEnumVARIANT test cases (#99, #100, #101). - * Added support for retrieving `time.Time` from VARIANT (#92). - * Added test case for IUnknown (#64). - * Added test case for IDispatch (#64). - * Added test cases for scalar variants (#64, #76). - -# Version 1.1.1 - - * Fixes for Linux build. - * Fixes for Windows build. - -# Version 1.1.0 - -The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. - - * Move GUID out of variables.go into its own file to make new documentation available. - * Move OleError out of ole.go into its own file to make new documentation available. - * Add documentation to utility functions. - * Add documentation to variant receiver functions. - * Add documentation to ole structures. - * Make variant available to other systems outside of Windows. - * Make OLE structures available to other systems outside of Windows. - -## New Features - - * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. - * More functions are now documented and available on godoc.org. - -# Version 1.0.1 - - 1. Fix package references from repository location change. - -# Version 1.0.0 - -This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. - -There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE deleted file mode 100644 index 623ec06..0000000 --- a/vendor/github.com/go-ole/go-ole/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright © 2013-2017 Yasuhiro Matsumoto, - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md deleted file mode 100644 index 0ea9db3..0000000 --- a/vendor/github.com/go-ole/go-ole/README.md +++ /dev/null @@ -1,46 +0,0 @@ -#Go OLE - -[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) -[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) -[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) - -Go bindings for Windows COM using shared libraries instead of cgo. - -By Yasuhiro Matsumoto. - -## Install - -To experiment with go-ole, you can just compile and run the example program: - -``` -go get github.com/go-ole/go-ole -cd /path/to/go-ole/ -go test - -cd /path/to/go-ole/example/excel -go run excel.go -``` - -## Continuous Integration - -Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. - -**Travis-CI** - -Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. - -**AppVeyor** - -AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. - -The tests currently do run and do pass and this should be maintained with commits. - -##Versioning - -Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. - -This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. - -##LICENSE - -Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml deleted file mode 100644 index 0d557ac..0000000 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Notes: -# - Minimal appveyor.yml file is an empty file. All sections are optional. -# - Indent each level of configuration with 2 spaces. Do not use tabs! -# - All section names are case-sensitive. -# - Section names should be unique on each level. - -version: "1.3.0.{build}-alpha-{branch}" - -os: Windows Server 2012 R2 - -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 - -skip_tags: true - -clone_folder: c:\gopath\src\github.com\go-ole\go-ole - -environment: - GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" - -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% - # - Download COM Server - - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set - - go version - - go env - - go get -u golang.org/x/tools/cmd/cover - - go get -u golang.org/x/tools/cmd/godoc - - go get -u golang.org/x/tools/cmd/stringer - -build_script: - - cd c:\gopath\src\github.com\go-ole\go-ole - - go get -v -t ./... - - go build - - go test -v -cover ./... - -# disable automatic tests -test: off - -# disable deployment -deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go deleted file mode 100644 index 75ebbf1..0000000 --- a/vendor/github.com/go-ole/go-ole/com.go +++ /dev/null @@ -1,329 +0,0 @@ -// +build windows - -package ole - -import ( - "errors" - "syscall" - "time" - "unicode/utf16" - "unsafe" -) - -var ( - procCoInitialize, _ = modole32.FindProc("CoInitialize") - procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") - procCoUninitialize, _ = modole32.FindProc("CoUninitialize") - procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") - procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") - procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") - procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") - procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") - procStringFromIID, _ = modole32.FindProc("StringFromIID") - procIIDFromString, _ = modole32.FindProc("IIDFromString") - procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") - procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") - procVariantInit, _ = modoleaut32.FindProc("VariantInit") - procVariantClear, _ = modoleaut32.FindProc("VariantClear") - procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") - procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") - procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") - procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") - procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") - procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") - procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") - procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") - - procGetMessageW, _ = moduser32.FindProc("GetMessageW") - procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx - // Suggests that no value should be passed to CoInitialized. - // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. - hr, _, _ := procCoInitialize.Call(uintptr(0)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx - // Suggests that the first parameter is not only optional but should always be NULL. - hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) (err error) { - // p is ignored and won't be used. - // Avoid any variable not used errors. - p = uintptr(0) - return coInitialize() -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) (err error) { - // Avoid any variable not used errors. - p = uintptr(0) - return coInitializeEx(coinit) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() { - procCoUninitialize.Call() -} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) { - procCoTaskMemFree.Call(memptr) -} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (clsid *GUID, err error) { - var guid GUID - lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) - hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procCoCreateInstance.Call( - uintptr(unsafe.Pointer(clsid)), - 0, - CLSCTX_SERVER, - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procGetActiveObject.Call( - uintptr(unsafe.Pointer(clsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) (err error) { - hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) (err error) { - hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) (ss *int16) { - pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) (ss *int16) { - utf16 := utf16.Encode([]rune(v + "\x00")) - ptr := &utf16[0] - - pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) (err error) { - hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) - return uint32(l) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { - hr, _, _ := procCreateStdDispatch.Call( - uintptr(unsafe.Pointer(unk)), - v, - uintptr(unsafe.Pointer(ptinfo)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { - hr, _, _ := procCreateDispTypeInfo.Call( - uintptr(unsafe.Pointer(idata)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&pptinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { - procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) -} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() (lcid uint32) { - ret, _, _ := procGetUserDefaultLCID.Call() - lcid = uint32(ret) - return -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { - r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) - ret = int32(r0) - return -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) (ret int32) { - r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) - ret = int32(r0) - return -} - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value float64) (time.Time, error) { - var st syscall.Systemtime - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go deleted file mode 100644 index 425aad3..0000000 --- a/vendor/github.com/go-ole/go-ole/com_func.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build !windows - -package ole - -import ( - "time" - "unsafe" -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() error { - return NewError(E_NOTIMPL) -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) error { - return NewError(E_NOTIMPL) -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() {} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) {} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) *int16 { - u := int16(0) - return &u -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) *int16 { - u := int16(0) - return &u -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) error { - return NewError(E_NOTIMPL) -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - return uint32(0) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { - return nil, NewError(E_NOTIMPL) -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() uint32 { - return uint32(0) -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) int32 { - return int32(0) -} - -func GetVariantDate(value float64) (time.Time, error) { - return time.Now(), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go deleted file mode 100644 index b2ac2ec..0000000 --- a/vendor/github.com/go-ole/go-ole/connect.go +++ /dev/null @@ -1,192 +0,0 @@ -package ole - -// Connection contains IUnknown for fluent interface interaction. -// -// Deprecated. Use oleutil package instead. -type Connection struct { - Object *IUnknown // Access COM -} - -// Initialize COM. -func (*Connection) Initialize() (err error) { - return coInitialize() -} - -// Uninitialize COM. -func (*Connection) Uninitialize() { - CoUninitialize() -} - -// Create IUnknown object based first on ProgId and then from String. -func (c *Connection) Create(progId string) (err error) { - var clsid *GUID - clsid, err = CLSIDFromProgID(progId) - if err != nil { - clsid, err = CLSIDFromString(progId) - if err != nil { - return - } - } - - unknown, err := CreateInstance(clsid, IID_IUnknown) - if err != nil { - return - } - c.Object = unknown - - return -} - -// Release IUnknown object. -func (c *Connection) Release() { - c.Object.Release() -} - -// Load COM object from list of programIDs or strings. -func (c *Connection) Load(names ...string) (errors []error) { - var tempErrors []error = make([]error, len(names)) - var numErrors int = 0 - for _, name := range names { - err := c.Create(name) - if err != nil { - tempErrors = append(tempErrors, err) - numErrors += 1 - continue - } - break - } - - copy(errors, tempErrors[0:numErrors]) - return -} - -// Dispatch returns Dispatch object. -func (c *Connection) Dispatch() (object *Dispatch, err error) { - dispatch, err := c.Object.QueryInterface(IID_IDispatch) - if err != nil { - return - } - object = &Dispatch{dispatch} - return -} - -// Dispatch stores IDispatch object. -type Dispatch struct { - Object *IDispatch // Dispatch object. -} - -// Call method on IDispatch with parameters. -func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(method) - if err != nil { - return - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - return -} - -// MustCall method on IDispatch with parameters. -func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(method) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - if err != nil { - panic(err) - } - - return -} - -// Get property on IDispatch with parameters. -func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - return -} - -// MustGet property on IDispatch with parameters. -func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - if err != nil { - panic(err) - } - return -} - -// Set property on IDispatch with parameters. -func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - return -} - -// MustSet property on IDispatch with parameters. -func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - if err != nil { - panic(err) - } - return -} - -// GetId retrieves ID of name on IDispatch. -func (d *Dispatch) GetId(name string) (id int32, err error) { - var dispid []int32 - dispid, err = d.Object.GetIDsOfName([]string{name}) - if err != nil { - return - } - id = dispid[0] - return -} - -// GetIds retrieves all IDs of names on IDispatch. -func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { - dispid, err = d.Object.GetIDsOfName(names) - return -} - -// Invoke IDispatch on DisplayID of dispatch type with parameters. -// -// There have been problems where if send cascading params..., it would error -// out because the parameters would be empty. -func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { - if len(params) < 1 { - result, err = d.Object.Invoke(id, dispatch) - } else { - result, err = d.Object.Invoke(id, dispatch, params...) - } - return -} - -// Release IDispatch object. -func (d *Dispatch) Release() { - d.Object.Release() -} - -// Connect initializes COM and attempts to load IUnknown based on given names. -func Connect(names ...string) (connection *Connection) { - connection.Initialize() - connection.Load(names...) - return -} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go deleted file mode 100644 index fd0c6d7..0000000 --- a/vendor/github.com/go-ole/go-ole/constants.go +++ /dev/null @@ -1,153 +0,0 @@ -package ole - -const ( - CLSCTX_INPROC_SERVER = 1 - CLSCTX_INPROC_HANDLER = 2 - CLSCTX_LOCAL_SERVER = 4 - CLSCTX_INPROC_SERVER16 = 8 - CLSCTX_REMOTE_SERVER = 16 - CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER - CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER - CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER -) - -const ( - COINIT_APARTMENTTHREADED = 0x2 - COINIT_MULTITHREADED = 0x0 - COINIT_DISABLE_OLE1DDE = 0x4 - COINIT_SPEED_OVER_MEMORY = 0x8 -) - -const ( - DISPATCH_METHOD = 1 - DISPATCH_PROPERTYGET = 2 - DISPATCH_PROPERTYPUT = 4 - DISPATCH_PROPERTYPUTREF = 8 -) - -const ( - S_OK = 0x00000000 - E_UNEXPECTED = 0x8000FFFF - E_NOTIMPL = 0x80004001 - E_OUTOFMEMORY = 0x8007000E - E_INVALIDARG = 0x80070057 - E_NOINTERFACE = 0x80004002 - E_POINTER = 0x80004003 - E_HANDLE = 0x80070006 - E_ABORT = 0x80004004 - E_FAIL = 0x80004005 - E_ACCESSDENIED = 0x80070005 - E_PENDING = 0x8000000A - - CO_E_CLASSSTRING = 0x800401F3 -) - -const ( - CC_FASTCALL = iota - CC_CDECL - CC_MSCPASCAL - CC_PASCAL = CC_MSCPASCAL - CC_MACPASCAL - CC_STDCALL - CC_FPFASTCALL - CC_SYSCALL - CC_MPWCDECL - CC_MPWPASCAL - CC_MAX = CC_MPWPASCAL -) - -type VT uint16 - -const ( - VT_EMPTY VT = 0x0 - VT_NULL VT = 0x1 - VT_I2 VT = 0x2 - VT_I4 VT = 0x3 - VT_R4 VT = 0x4 - VT_R8 VT = 0x5 - VT_CY VT = 0x6 - VT_DATE VT = 0x7 - VT_BSTR VT = 0x8 - VT_DISPATCH VT = 0x9 - VT_ERROR VT = 0xa - VT_BOOL VT = 0xb - VT_VARIANT VT = 0xc - VT_UNKNOWN VT = 0xd - VT_DECIMAL VT = 0xe - VT_I1 VT = 0x10 - VT_UI1 VT = 0x11 - VT_UI2 VT = 0x12 - VT_UI4 VT = 0x13 - VT_I8 VT = 0x14 - VT_UI8 VT = 0x15 - VT_INT VT = 0x16 - VT_UINT VT = 0x17 - VT_VOID VT = 0x18 - VT_HRESULT VT = 0x19 - VT_PTR VT = 0x1a - VT_SAFEARRAY VT = 0x1b - VT_CARRAY VT = 0x1c - VT_USERDEFINED VT = 0x1d - VT_LPSTR VT = 0x1e - VT_LPWSTR VT = 0x1f - VT_RECORD VT = 0x24 - VT_INT_PTR VT = 0x25 - VT_UINT_PTR VT = 0x26 - VT_FILETIME VT = 0x40 - VT_BLOB VT = 0x41 - VT_STREAM VT = 0x42 - VT_STORAGE VT = 0x43 - VT_STREAMED_OBJECT VT = 0x44 - VT_STORED_OBJECT VT = 0x45 - VT_BLOB_OBJECT VT = 0x46 - VT_CF VT = 0x47 - VT_CLSID VT = 0x48 - VT_BSTR_BLOB VT = 0xfff - VT_VECTOR VT = 0x1000 - VT_ARRAY VT = 0x2000 - VT_BYREF VT = 0x4000 - VT_RESERVED VT = 0x8000 - VT_ILLEGAL VT = 0xffff - VT_ILLEGALMASKED VT = 0xfff - VT_TYPEMASK VT = 0xfff -) - -const ( - DISPID_UNKNOWN = -1 - DISPID_VALUE = 0 - DISPID_PROPERTYPUT = -3 - DISPID_NEWENUM = -4 - DISPID_EVALUATE = -5 - DISPID_CONSTRUCTOR = -6 - DISPID_DESTRUCTOR = -7 - DISPID_COLLECT = -8 -) - -const ( - TKIND_ENUM = 1 - TKIND_RECORD = 2 - TKIND_MODULE = 3 - TKIND_INTERFACE = 4 - TKIND_DISPATCH = 5 - TKIND_COCLASS = 6 - TKIND_ALIAS = 7 - TKIND_UNION = 8 - TKIND_MAX = 9 -) - -// Safe Array Feature Flags - -const ( - FADF_AUTO = 0x0001 - FADF_STATIC = 0x0002 - FADF_EMBEDDED = 0x0004 - FADF_FIXEDSIZE = 0x0010 - FADF_RECORD = 0x0020 - FADF_HAVEIID = 0x0040 - FADF_HAVEVARTYPE = 0x0080 - FADF_BSTR = 0x0100 - FADF_UNKNOWN = 0x0200 - FADF_DISPATCH = 0x0400 - FADF_VARIANT = 0x0800 - FADF_RESERVED = 0xF008 -) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go deleted file mode 100644 index 096b456..0000000 --- a/vendor/github.com/go-ole/go-ole/error.go +++ /dev/null @@ -1,51 +0,0 @@ -package ole - -// OleError stores COM errors. -type OleError struct { - hr uintptr - description string - subError error -} - -// NewError creates new error with HResult. -func NewError(hr uintptr) *OleError { - return &OleError{hr: hr} -} - -// NewErrorWithDescription creates new COM error with HResult and description. -func NewErrorWithDescription(hr uintptr, description string) *OleError { - return &OleError{hr: hr, description: description} -} - -// NewErrorWithSubError creates new COM error with parent error. -func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { - return &OleError{hr: hr, description: description, subError: err} -} - -// Code is the HResult. -func (v *OleError) Code() uintptr { - return uintptr(v.hr) -} - -// String description, either manually set or format message with error code. -func (v *OleError) String() string { - if v.description != "" { - return errstr(int(v.hr)) + " (" + v.description + ")" - } - return errstr(int(v.hr)) -} - -// Error implements error interface. -func (v *OleError) Error() string { - return v.String() -} - -// Description retrieves error summary, if there is one. -func (v *OleError) Description() string { - return v.description -} - -// SubError returns parent error, if there is one. -func (v *OleError) SubError() error { - return v.subError -} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go deleted file mode 100644 index 8a2ffaa..0000000 --- a/vendor/github.com/go-ole/go-ole/error_func.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package ole - -// errstr converts error code to string. -func errstr(errno int) string { - return "" -} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go deleted file mode 100644 index d0e8e68..0000000 --- a/vendor/github.com/go-ole/go-ole/error_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build windows - -package ole - -import ( - "fmt" - "syscall" - "unicode/utf16" -) - -// errstr converts error code to string. -func errstr(errno int) string { - // ask windows for the remaining errors - var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS - b := make([]uint16, 300) - n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) - if err != nil { - return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) - } - // trim terminating \r and \n - for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { - } - return string(utf16.Decode(b[:n])) -} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go deleted file mode 100644 index 8d20f68..0000000 --- a/vendor/github.com/go-ole/go-ole/guid.go +++ /dev/null @@ -1,284 +0,0 @@ -package ole - -var ( - // IID_NULL is null Interface ID, used when no other Interface ID is known. - IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") - - // IID_IUnknown is for IUnknown interfaces. - IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") - - // IID_IDispatch is for IDispatch interfaces. - IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") - - // IID_IEnumVariant is for IEnumVariant interfaces - IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") - - // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. - IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") - - // IID_IConnectionPoint is for IConnectionPoint interfaces. - IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") - - // IID_IInspectable is for IInspectable interfaces. - IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") - - // IID_IProvideClassInfo is for IProvideClassInfo interfaces. - IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") -) - -// These are for testing and not part of any library. -var ( - // IID_ICOMTestString is for ICOMTestString interfaces. - // - // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} - IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") - - // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. - // - // {BEB06610-EB84-4155-AF58-E2BFF53680B4} - IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") - - // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. - // - // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} - IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") - - // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. - // - // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} - IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") - - // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. - // - // {8D437CBC-B3ED-485C-BC32-C336432A1623} - IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") - - // IID_ICOMTestFloat is for ICOMTestFloat interfaces. - // - // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} - IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") - - // IID_ICOMTestDouble is for ICOMTestDouble interfaces. - // - // {BF908A81-8687-4E93-999F-D86FAB284BA0} - IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") - - // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. - // - // {D530E7A6-4EE8-40D1-8931-3D63B8605010} - IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") - - // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. - // - // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} - IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") - - // IID_ICOMTestTypes is for ICOMTestTypes interfaces. - // - // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} - IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") - - // CLSID_COMEchoTestObject is for COMEchoTestObject class. - // - // {3C24506A-AE9E-4D50-9157-EF317281F1B0} - CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") - - // CLSID_COMTestScalarClass is for COMTestScalarClass class. - // - // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} - CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") -) - -const hextable = "0123456789ABCDEF" -const emptyGUID = "{00000000-0000-0000-0000-000000000000}" - -// GUID is Windows API specific GUID type. -// -// This exists to match Windows GUID type for direct passing for COM. -// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -// NewGUID converts the given string into a globally unique identifier that is -// compliant with the Windows API. -// -// The supplied string may be in any of these formats: -// -// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// The conversion of the supplied string is not case-sensitive. -func NewGUID(guid string) *GUID { - d := []byte(guid) - var d1, d2, d3, d4a, d4b []byte - - switch len(d) { - case 38: - if d[0] != '{' || d[37] != '}' { - return nil - } - d = d[1:37] - fallthrough - case 36: - if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { - return nil - } - d1 = d[0:8] - d2 = d[9:13] - d3 = d[14:18] - d4a = d[19:23] - d4b = d[24:36] - case 32: - d1 = d[0:8] - d2 = d[8:12] - d3 = d[12:16] - d4a = d[16:20] - d4b = d[20:32] - default: - return nil - } - - var g GUID - var ok1, ok2, ok3, ok4 bool - g.Data1, ok1 = decodeHexUint32(d1) - g.Data2, ok2 = decodeHexUint16(d2) - g.Data3, ok3 = decodeHexUint16(d3) - g.Data4, ok4 = decodeHexByte64(d4a, d4b) - if ok1 && ok2 && ok3 && ok4 { - return &g - } - return nil -} - -func decodeHexUint32(src []byte) (value uint32, ok bool) { - var b1, b2, b3, b4 byte - var ok1, ok2, ok3, ok4 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - b3, ok3 = decodeHexByte(src[4], src[5]) - b4, ok4 = decodeHexByte(src[6], src[7]) - value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) - ok = ok1 && ok2 && ok3 && ok4 - return -} - -func decodeHexUint16(src []byte) (value uint16, ok bool) { - var b1, b2 byte - var ok1, ok2 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - value = (uint16(b1) << 8) | uint16(b2) - ok = ok1 && ok2 - return -} - -func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { - var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool - value[0], ok1 = decodeHexByte(s1[0], s1[1]) - value[1], ok2 = decodeHexByte(s1[2], s1[3]) - value[2], ok3 = decodeHexByte(s2[0], s2[1]) - value[3], ok4 = decodeHexByte(s2[2], s2[3]) - value[4], ok5 = decodeHexByte(s2[4], s2[5]) - value[5], ok6 = decodeHexByte(s2[6], s2[7]) - value[6], ok7 = decodeHexByte(s2[8], s2[9]) - value[7], ok8 = decodeHexByte(s2[10], s2[11]) - ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 - return -} - -func decodeHexByte(c1, c2 byte) (value byte, ok bool) { - var n1, n2 byte - var ok1, ok2 bool - n1, ok1 = decodeHexChar(c1) - n2, ok2 = decodeHexChar(c2) - value = (n1 << 4) | n2 - ok = ok1 && ok2 - return -} - -func decodeHexChar(c byte) (byte, bool) { - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - - return 0, false -} - -// String converts the GUID to string form. It will adhere to this pattern: -// -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// If the GUID is nil, the string representation of an empty GUID is returned: -// -// {00000000-0000-0000-0000-000000000000} -func (guid *GUID) String() string { - if guid == nil { - return emptyGUID - } - - var c [38]byte - c[0] = '{' - putUint32Hex(c[1:9], guid.Data1) - c[9] = '-' - putUint16Hex(c[10:14], guid.Data2) - c[14] = '-' - putUint16Hex(c[15:19], guid.Data3) - c[19] = '-' - putByteHex(c[20:24], guid.Data4[0:2]) - c[24] = '-' - putByteHex(c[25:37], guid.Data4[2:8]) - c[37] = '}' - return string(c[:]) -} - -func putUint32Hex(b []byte, v uint32) { - b[0] = hextable[byte(v>>24)>>4] - b[1] = hextable[byte(v>>24)&0x0f] - b[2] = hextable[byte(v>>16)>>4] - b[3] = hextable[byte(v>>16)&0x0f] - b[4] = hextable[byte(v>>8)>>4] - b[5] = hextable[byte(v>>8)&0x0f] - b[6] = hextable[byte(v)>>4] - b[7] = hextable[byte(v)&0x0f] -} - -func putUint16Hex(b []byte, v uint16) { - b[0] = hextable[byte(v>>8)>>4] - b[1] = hextable[byte(v>>8)&0x0f] - b[2] = hextable[byte(v)>>4] - b[3] = hextable[byte(v)&0x0f] -} - -func putByteHex(dst, src []byte) { - for i := 0; i < len(src); i++ { - dst[i*2] = hextable[src[i]>>4] - dst[i*2+1] = hextable[src[i]&0x0f] - } -} - -// IsEqualGUID compares two GUID. -// -// Not constant time comparison. -func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { - return guid1.Data1 == guid2.Data1 && - guid1.Data2 == guid2.Data2 && - guid1.Data3 == guid2.Data3 && - guid1.Data4[0] == guid2.Data4[0] && - guid1.Data4[1] == guid2.Data4[1] && - guid1.Data4[2] == guid2.Data4[2] && - guid1.Data4[3] == guid2.Data4[3] && - guid1.Data4[4] == guid2.Data4[4] && - guid1.Data4[5] == guid2.Data4[5] && - guid1.Data4[6] == guid2.Data4[6] && - guid1.Data4[7] == guid2.Data4[7] -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go deleted file mode 100644 index 9e6c49f..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go +++ /dev/null @@ -1,20 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPoint struct { - IUnknown -} - -type IConnectionPointVtbl struct { - IUnknownVtbl - GetConnectionInterface uintptr - GetConnectionPointContainer uintptr - Advise uintptr - Unadvise uintptr - EnumConnections uintptr -} - -func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { - return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go deleted file mode 100644 index 5414dc3..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package ole - -import "unsafe" - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - return int32(0) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go deleted file mode 100644 index 32bc183..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - // XXX: This doesn't look like it does what it's supposed to - return release((*IUnknown)(unsafe.Pointer(v))) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Advise, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(unknown)), - uintptr(unsafe.Pointer(&cookie))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Unadvise, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(cookie), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go deleted file mode 100644 index 165860d..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go +++ /dev/null @@ -1,17 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPointContainer struct { - IUnknown -} - -type IConnectionPointContainerVtbl struct { - IUnknownVtbl - EnumConnectionPoints uintptr - FindConnectionPoint uintptr -} - -func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { - return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go deleted file mode 100644 index 5dfa42a..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package ole - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go deleted file mode 100644 index ad30d79..0000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().FindConnectionPoint, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(point))) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go deleted file mode 100644 index d4af124..0000000 --- a/vendor/github.com/go-ole/go-ole/idispatch.go +++ /dev/null @@ -1,94 +0,0 @@ -package ole - -import "unsafe" - -type IDispatch struct { - IUnknown -} - -type IDispatchVtbl struct { - IUnknownVtbl - GetTypeInfoCount uintptr - GetTypeInfo uintptr - GetIDsOfNames uintptr - Invoke uintptr -} - -func (v *IDispatch) VTable() *IDispatchVtbl { - return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { - dispid, err = getIDsOfName(v, names) - return -} - -func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - result, err = invoke(v, dispid, dispatch, params...) - return -} - -func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { - c, err = getTypeInfoCount(v) - return -} - -func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { - tinfo, err = getTypeInfo(v) - return -} - -// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. -// -// This replaces the common pattern of attempting to get a single name from the list of available -// IDs. It gives the first ID, if it is available. -func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { - var displayIDs []int32 - displayIDs, err = v.GetIDsOfName([]string{name}) - if err != nil { - return - } - displayID = displayIDs[0] - return -} - -// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. -// -// Accepts name and will attempt to retrieve Display ID to pass to Invoke. -// -// Passing params as an array is a workaround that could be fixed in later versions of Go that -// prevent passing empty params. During testing it was discovered that this is an acceptable way of -// getting around not being able to pass params normally. -func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { - displayID, err := v.GetSingleIDOfName(name) - if err != nil { - return - } - - if len(params) < 1 { - result, err = v.Invoke(displayID, dispatch) - } else { - result, err = v.Invoke(displayID, dispatch, params...) - } - - return -} - -// CallMethod invokes named function with arguments on object. -func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) -} - -// GetProperty retrieves the property with the name with the ability to pass arguments. -// -// Most of the time you will not need to pass arguments as most objects do not allow for this -// feature. Or at least, should not allow for this feature. Some servers don't follow best practices -// and this is provided for those edge cases. -func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) -} - -// PutProperty attempts to mutate a property in the object. -func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go deleted file mode 100644 index b8fbbe3..0000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { - return []int32{}, NewError(E_NOTIMPL) -} - -func getTypeInfoCount(disp *IDispatch) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { - return nil, NewError(E_NOTIMPL) -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go deleted file mode 100644 index 020e4f5..0000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "time" - "unsafe" -) - -func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { - wnames := make([]*uint16, len(names)) - for i := 0; i < len(names); i++ { - wnames[i] = syscall.StringToUTF16Ptr(names[i]) - } - dispid = make([]int32, len(names)) - namelen := uint32(len(names)) - hr, _, _ := syscall.Syscall6( - disp.VTable().GetIDsOfNames, - 6, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(unsafe.Pointer(&wnames[0])), - uintptr(namelen), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&dispid[0]))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfoCount, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&c)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfo, - 3, - uintptr(unsafe.Pointer(disp)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&tinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - var dispparams DISPPARAMS - - if dispatch&DISPATCH_PROPERTYPUT != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } - var vargs []VARIANT - if len(params) > 0 { - vargs = make([]VARIANT, len(params)) - for i, v := range params { - //n := len(params)-i-1 - n := len(params) - i - 1 - VariantInit(&vargs[n]) - switch vv := v.(type) { - case bool: - if vv { - vargs[n] = NewVariant(VT_BOOL, 0xffff) - } else { - vargs[n] = NewVariant(VT_BOOL, 0) - } - case *bool: - vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) - case uint8: - vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) - case *uint8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int8: - vargs[n] = NewVariant(VT_I1, int64(v.(int8))) - case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int16: - vargs[n] = NewVariant(VT_I2, int64(v.(int16))) - case *int16: - vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) - case uint16: - vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) - case *uint16: - vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) - case int32: - vargs[n] = NewVariant(VT_I4, int64(v.(int32))) - case *int32: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) - case uint32: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) - case *uint32: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) - case int64: - vargs[n] = NewVariant(VT_I8, int64(v.(int64))) - case *int64: - vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) - case uint64: - vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) - case *uint64: - vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) - case int: - vargs[n] = NewVariant(VT_I4, int64(v.(int))) - case *int: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) - case uint: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) - case *uint: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) - case float32: - vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) - case *float32: - vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) - case float64: - vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) - case *float64: - vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) - case string: - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) - case *string: - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) - case time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) - case *time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) - case *IDispatch: - vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) - case **IDispatch: - vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) - case nil: - vargs[n] = NewVariant(VT_NULL, 0) - case *VARIANT: - vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) - case []byte: - safeByteArray := safeArrayFromByteSlice(v.([]byte)) - vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - case []string: - safeByteArray := safeArrayFromStringSlice(v.([]string)) - vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - default: - panic("unknown type") - } - } - dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) - dispparams.cArgs = uint32(len(params)) - } - - result = new(VARIANT) - var excepInfo EXCEPINFO - VariantInit(result) - hr, _, _ := syscall.Syscall9( - disp.VTable().Invoke, - 9, - uintptr(unsafe.Pointer(disp)), - uintptr(dispid), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(GetUserDefaultLCID()), - uintptr(dispatch), - uintptr(unsafe.Pointer(&dispparams)), - uintptr(unsafe.Pointer(result)), - uintptr(unsafe.Pointer(&excepInfo)), - 0) - if hr != 0 { - err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo) - } - for i, varg := range vargs { - n := len(params) - i - 1 - if varg.VT == VT_BSTR && varg.Val != 0 { - SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) - } - if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { - *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) - } - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go deleted file mode 100644 index 2433897..0000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant.go +++ /dev/null @@ -1,19 +0,0 @@ -package ole - -import "unsafe" - -type IEnumVARIANT struct { - IUnknown -} - -type IEnumVARIANTVtbl struct { - IUnknownVtbl - Next uintptr - Skip uintptr - Reset uintptr - Clone uintptr -} - -func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { - return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go deleted file mode 100644 index c148481..0000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { - return nil, NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Reset() error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Skip(celt uint) error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { - return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go deleted file mode 100644 index 4781f3b..0000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Clone, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(unsafe.Pointer(&cloned)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Reset() (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Reset, - 1, - uintptr(unsafe.Pointer(enum)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Skip(celt uint) (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Skip, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { - hr, _, _ := syscall.Syscall6( - enum.VTable().Next, - 4, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - uintptr(unsafe.Pointer(&array)), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go deleted file mode 100644 index f4a19e2..0000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable.go +++ /dev/null @@ -1,18 +0,0 @@ -package ole - -import "unsafe" - -type IInspectable struct { - IUnknown -} - -type IInspectableVtbl struct { - IUnknownVtbl - GetIIds uintptr - GetRuntimeClassName uintptr - GetTrustLevel uintptr -} - -func (v *IInspectable) VTable() *IInspectableVtbl { - return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go deleted file mode 100644 index 348829b..0000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_func.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package ole - -func (v *IInspectable) GetIids() ([]*GUID, error) { - return []*GUID{}, NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetRuntimeClassName() (string, error) { - return "", NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetTrustLevel() (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go deleted file mode 100644 index 4519a4a..0000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build windows - -package ole - -import ( - "bytes" - "encoding/binary" - "reflect" - "syscall" - "unsafe" -) - -func (v *IInspectable) GetIids() (iids []*GUID, err error) { - var count uint32 - var array uintptr - hr, _, _ := syscall.Syscall( - v.VTable().GetIIds, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&count)), - uintptr(unsafe.Pointer(&array))) - if hr != 0 { - err = NewError(hr) - return - } - defer CoTaskMemFree(array) - - iids = make([]*GUID, count) - byteCount := count * uint32(unsafe.Sizeof(GUID{})) - slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} - byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) - reader := bytes.NewReader(byteSlice) - for i := range iids { - guid := GUID{} - err = binary.Read(reader, binary.LittleEndian, &guid) - if err != nil { - return - } - iids[i] = &guid - } - return -} - -func (v *IInspectable) GetRuntimeClassName() (s string, err error) { - var hstring HString - hr, _, _ := syscall.Syscall( - v.VTable().GetRuntimeClassName, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&hstring)), - 0) - if hr != 0 { - err = NewError(hr) - return - } - s = hstring.String() - DeleteHString(hstring) - return -} - -func (v *IInspectable) GetTrustLevel() (level uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().GetTrustLevel, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&level)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go deleted file mode 100644 index 25f3a6f..0000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go +++ /dev/null @@ -1,21 +0,0 @@ -package ole - -import "unsafe" - -type IProvideClassInfo struct { - IUnknown -} - -type IProvideClassInfoVtbl struct { - IUnknownVtbl - GetClassInfo uintptr -} - -func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { - return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { - cinfo, err = getClassInfo(v) - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go deleted file mode 100644 index 7e3cb63..0000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go deleted file mode 100644 index 2ad0163..0000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetClassInfo, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&tinfo)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go deleted file mode 100644 index dd3c5e2..0000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -package ole - -import "unsafe" - -type ITypeInfo struct { - IUnknown -} - -type ITypeInfoVtbl struct { - IUnknownVtbl - GetTypeAttr uintptr - GetTypeComp uintptr - GetFuncDesc uintptr - GetVarDesc uintptr - GetNames uintptr - GetRefTypeOfImplType uintptr - GetImplTypeFlags uintptr - GetIDsOfNames uintptr - Invoke uintptr - GetDocumentation uintptr - GetDllEntry uintptr - GetRefTypeInfo uintptr - AddressOfMember uintptr - CreateInstance uintptr - GetMops uintptr - GetContainingTypeLib uintptr - ReleaseTypeAttr uintptr - ReleaseFuncDesc uintptr - ReleaseVarDesc uintptr -} - -func (v *ITypeInfo) VTable() *ITypeInfoVtbl { - return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go deleted file mode 100644 index 8364a65..0000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go deleted file mode 100644 index 54782b3..0000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { - hr, _, _ := syscall.Syscall( - uintptr(v.VTable().GetTypeAttr), - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&tattr)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go deleted file mode 100644 index 108f28e..0000000 --- a/vendor/github.com/go-ole/go-ole/iunknown.go +++ /dev/null @@ -1,57 +0,0 @@ -package ole - -import "unsafe" - -type IUnknown struct { - RawVTable *interface{} -} - -type IUnknownVtbl struct { - QueryInterface uintptr - AddRef uintptr - Release uintptr -} - -type UnknownLike interface { - QueryInterface(iid *GUID) (disp *IDispatch, err error) - AddRef() int32 - Release() int32 -} - -func (v *IUnknown) VTable() *IUnknownVtbl { - return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { - return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) -} - -func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { - err = v.PutQueryInterface(interfaceID, &dispatch) - return -} - -func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { - err = v.PutQueryInterface(interfaceID, &enum) - return -} - -func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { - return queryInterface(v, iid) -} - -func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { - unk, err := queryInterface(v, iid) - if err != nil { - panic(err) - } - return unk -} - -func (v *IUnknown) AddRef() int32 { - return addRef(v) -} - -func (v *IUnknown) Release() int32 { - return release(v) -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go deleted file mode 100644 index d0a62cf..0000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - return NewError(E_NOTIMPL) -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - return nil, NewError(E_NOTIMPL) -} - -func addRef(unk *IUnknown) int32 { - return 0 -} - -func release(unk *IUnknown) int32 { - return 0 -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go deleted file mode 100644 index ede5bb8..0000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unsafe" -) - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - selfValue := reflect.ValueOf(self).Elem() - objValue := reflect.ValueOf(obj).Elem() - - hr, _, _ := syscall.Syscall( - method, - 3, - selfValue.UnsafeAddr(), - uintptr(unsafe.Pointer(interfaceID)), - objValue.Addr().Pointer()) - if hr != 0 { - err = NewError(hr) - } - return -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - hr, _, _ := syscall.Syscall( - unk.VTable().QueryInterface, - 3, - uintptr(unsafe.Pointer(unk)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func addRef(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().AddRef, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} - -func release(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().Release, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go deleted file mode 100644 index e2ae4f4..0000000 --- a/vendor/github.com/go-ole/go-ole/ole.go +++ /dev/null @@ -1,157 +0,0 @@ -package ole - -import ( - "fmt" - "strings" -) - -// DISPPARAMS are the arguments that passed to methods or property. -type DISPPARAMS struct { - rgvarg uintptr - rgdispidNamedArgs uintptr - cArgs uint32 - cNamedArgs uint32 -} - -// EXCEPINFO defines exception info. -type EXCEPINFO struct { - wCode uint16 - wReserved uint16 - bstrSource *uint16 - bstrDescription *uint16 - bstrHelpFile *uint16 - dwHelpContext uint32 - pvReserved uintptr - pfnDeferredFillIn uintptr - scode uint32 -} - -// WCode return wCode in EXCEPINFO. -func (e EXCEPINFO) WCode() uint16 { - return e.wCode -} - -// SCODE return scode in EXCEPINFO. -func (e EXCEPINFO) SCODE() uint32 { - return e.scode -} - -// String convert EXCEPINFO to string. -func (e EXCEPINFO) String() string { - var src, desc, hlp string - if e.bstrSource == nil { - src = "" - } else { - src = BstrToString(e.bstrSource) - } - - if e.bstrDescription == nil { - desc = "" - } else { - desc = BstrToString(e.bstrDescription) - } - - if e.bstrHelpFile == nil { - hlp = "" - } else { - hlp = BstrToString(e.bstrHelpFile) - } - - return fmt.Sprintf( - "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", - e.wCode, src, desc, hlp, e.dwHelpContext, e.scode, - ) -} - -// Error implements error interface and returns error string. -func (e EXCEPINFO) Error() string { - if e.bstrDescription != nil { - return strings.TrimSpace(BstrToString(e.bstrDescription)) - } - - src := "Unknown" - if e.bstrSource != nil { - src = BstrToString(e.bstrSource) - } - - code := e.scode - if e.wCode != 0 { - code = uint32(e.wCode) - } - - return fmt.Sprintf("%v: %#x", src, code) -} - -// PARAMDATA defines parameter data type. -type PARAMDATA struct { - Name *int16 - Vt uint16 -} - -// METHODDATA defines method info. -type METHODDATA struct { - Name *uint16 - Data *PARAMDATA - Dispid int32 - Meth uint32 - CC int32 - CArgs uint32 - Flags uint16 - VtReturn uint32 -} - -// INTERFACEDATA defines interface info. -type INTERFACEDATA struct { - MethodData *METHODDATA - CMembers uint32 -} - -// Point is 2D vector type. -type Point struct { - X int32 - Y int32 -} - -// Msg is message between processes. -type Msg struct { - Hwnd uint32 - Message uint32 - Wparam int32 - Lparam int32 - Time uint32 - Pt Point -} - -// TYPEDESC defines data type. -type TYPEDESC struct { - Hreftype uint32 - VT uint16 -} - -// IDLDESC defines IDL info. -type IDLDESC struct { - DwReserved uint32 - WIDLFlags uint16 -} - -// TYPEATTR defines type info. -type TYPEATTR struct { - Guid GUID - Lcid uint32 - dwReserved uint32 - MemidConstructor int32 - MemidDestructor int32 - LpstrSchema *uint16 - CbSizeInstance uint32 - Typekind int32 - CFuncs uint16 - CVars uint16 - CImplTypes uint16 - CbSizeVft uint16 - CbAlignment uint16 - WTypeFlags uint16 - WMajorVerNum uint16 - WMinorVerNum uint16 - TdescAlias TYPEDESC - IdldescType IDLDESC -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go deleted file mode 100644 index 60df73c..0000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -type stdDispatch struct { - lpVtbl *stdDispatchVtbl - ref int32 - iid *ole.GUID - iface interface{} - funcMap map[string]int32 -} - -type stdDispatchVtbl struct { - pQueryInterface uintptr - pAddRef uintptr - pRelease uintptr - pGetTypeInfoCount uintptr - pGetTypeInfo uintptr - pGetIDsOfNames uintptr - pInvoke uintptr -} - -func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - *punk = nil - if ole.IsEqualGUID(iid, ole.IID_IUnknown) || - ole.IsEqualGUID(iid, ole.IID_IDispatch) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - if ole.IsEqualGUID(iid, pthis.iid) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - return ole.E_NOINTERFACE -} - -func dispAddRef(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref++ - return pthis.ref -} - -func dispRelease(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref-- - return pthis.ref -} - -func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - names := make([]string, len(wnames)) - for i := 0; i < len(names); i++ { - names[i] = ole.LpOleStrToString(wnames[i]) - } - for n := 0; n < namelen; n++ { - if id, ok := pthis.funcMap[names[n]]; ok { - pdisp[n] = id - } - } - return ole.S_OK -} - -func dispGetTypeInfoCount(pcount *int) uintptr { - if pcount != nil { - *pcount = 0 - } - return ole.S_OK -} - -func dispGetTypeInfo(ptypeif *uintptr) uintptr { - return ole.E_NOTIMPL -} - -func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - found := "" - for name, id := range pthis.funcMap { - if id == dispid { - found = name - } - } - if found != "" { - rv := reflect.ValueOf(pthis.iface).Elem() - rm := rv.MethodByName(found) - rr := rm.Call([]reflect.Value{}) - println(len(rr)) - return ole.S_OK - } - return ole.E_NOTIMPL -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go deleted file mode 100644 index 8818fb8..0000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { - return 0, ole.NewError(ole.E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go deleted file mode 100644 index ab9c0d8..0000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "syscall" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { - unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) - if err != nil { - return - } - - container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) - var point *ole.IConnectionPoint - err = container.FindConnectionPoint(iid, &point) - if err != nil { - return - } - if edisp, ok := idisp.(*ole.IUnknown); ok { - cookie, err = point.Advise(edisp) - container.Release() - if err != nil { - return - } - } - rv := reflect.ValueOf(disp).Elem() - if rv.Type().Kind() == reflect.Struct { - dest := &stdDispatch{} - dest.lpVtbl = &stdDispatchVtbl{} - dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) - dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) - dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) - dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) - dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) - dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) - dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) - dest.iface = disp - dest.iid = iid - cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) - container.Release() - if err != nil { - point.Release() - return - } - return - } - - container.Release() - - return 0, ole.NewError(ole.E_INVALIDARG) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go deleted file mode 100644 index 5834762..0000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go +++ /dev/null @@ -1,6 +0,0 @@ -// This file is here so go get succeeds as without it errors with: -// no buildable Go source files in ... -// -// +build !windows - -package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go deleted file mode 100644 index f7803c1..0000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go +++ /dev/null @@ -1,127 +0,0 @@ -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -func ClassIDFrom(programID string) (classID *ole.GUID, err error) { - return ole.ClassIDFrom(programID) -} - -// CreateObject creates object from programID based on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func CreateObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// GetActiveObject retrieves active object for program ID and interface ID based -// on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// CallMethod calls method on IDispatch with parameters. -func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) -} - -// MustCallMethod calls method on IDispatch with parameters or panics. -func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := CallMethod(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// GetProperty retrieves property from IDispatch. -func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) -} - -// MustGetProperty retrieves property from IDispatch or panics. -func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := GetProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutProperty mutates property. -func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) -} - -// MustPutProperty mutates property or panics. -func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutPropertyRef mutates property reference. -func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) -} - -// MustPutPropertyRef mutates property reference or panics. -func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutPropertyRef(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { - newEnum, err := disp.GetProperty("_NewEnum") - if err != nil { - return err - } - defer newEnum.Clear() - - enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - defer enum.Release() - - for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { - if err != nil { - return err - } - if ferr := f(&item); ferr != nil { - return ferr - } - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go deleted file mode 100644 index a5201b5..0000000 --- a/vendor/github.com/go-ole/go-ole/safearray.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package is meant to retrieve and process safe array data returned from COM. - -package ole - -// SafeArrayBound defines the SafeArray boundaries. -type SafeArrayBound struct { - Elements uint32 - LowerBound int32 -} - -// SafeArray is how COM handles arrays. -type SafeArray struct { - Dimensions uint16 - FeaturesFlag uint16 - ElementsSize uint32 - LocksAmount uint32 - Data uint32 - Bounds [16]byte -} - -// SAFEARRAY is obsolete, exists for backwards compatibility. -// Use SafeArray -type SAFEARRAY SafeArray - -// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. -// Use SafeArrayBound -type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go deleted file mode 100644 index 8ff0baa..0000000 --- a/vendor/github.com/go-ole/go-ole/safearray_func.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build !windows - -package ole - -import ( - "unsafe" -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { - return uintptr(0), NewError(E_NOTIMPL) -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int64) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) { - return int64(0), NewError(E_NOTIMPL) -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int64, error) { - return int64(0), NewError(E_NOTIMPL) -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { - return uint16(0), NewError(E_NOTIMPL) -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go deleted file mode 100644 index b27936e..0000000 --- a/vendor/github.com/go-ole/go-ole/safearray_windows.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -var ( - procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData") - procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData") - procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor") - procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx") - procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy") - procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData") - procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate") - procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx") - procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector") - procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx") - procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy") - procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData") - procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor") - procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim") - procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement") - procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize") - procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID") - procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound") - procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound") - procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype") - procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock") - procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex") - procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData") - procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock") - procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") - //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO - //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO - procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") - procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -// Todo: Test -func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { - err = convertHresultToError( - procSafeArrayAccessData.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&element)))) - return -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptorEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayCopy.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { - err = convertHresultToError( - procSafeArrayCopyData.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(duplicate)))) - return -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreate.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds))) - safearray = (*SafeArray)(unsafe.Pointer(&sa)) - return -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds)), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVector.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length)) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVectorEx.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { - l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) - dimensions = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { - l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) - length = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { - return convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(pv))) -} - -// safeArrayGetElementString retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, err error) { - var element *int16 - err = convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(&element)))) - str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) - SysFreeString(element) - return -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { - err = convertHresultToError( - procSafeArrayGetIID.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&guid)))) - return -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int64, err error) { - err = convertHresultToError( - procSafeArrayGetLBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&lowerBound)))) - return -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int64, err error) { - err = convertHresultToError( - procSafeArrayGetUBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&upperBound)))) - return -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { - err = convertHresultToError( - procSafeArrayGetVartype.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&varType)))) - return -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { - err = convertHresultToError( - procSafeArrayPutElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(element)))) - return -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { - err = convertHresultToError( - procSafeArrayGetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { - err = convertHresultToError( - procSafeArraySetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go deleted file mode 100644 index ffeb2b9..0000000 --- a/vendor/github.com/go-ole/go-ole/safearrayconversion.go +++ /dev/null @@ -1,140 +0,0 @@ -// Helper for converting SafeArray to array of objects. - -package ole - -import ( - "unsafe" -) - -type SafeArrayConversion struct { - Array *SafeArray -} - -func (sac *SafeArrayConversion) ToStringArray() (strings []string) { - totalElements, _ := sac.TotalElements(0) - strings = make([]string, totalElements) - - for i := int64(0); i < totalElements; i++ { - strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) - } - - return -} - -func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { - totalElements, _ := sac.TotalElements(0) - bytes = make([]byte, totalElements) - - for i := int64(0); i < totalElements; i++ { - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) - } - - return -} - -func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { - totalElements, _ := sac.TotalElements(0) - values = make([]interface{}, totalElements) - vt, _ := safeArrayGetVartype(sac.Array) - - for i := 0; i < int(totalElements); i++ { - switch VT(vt) { - case VT_BOOL: - var v bool - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_I1: - var v int8 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_I2: - var v int16 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_I4: - var v int32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_I8: - var v int64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_UI1: - var v uint8 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_UI2: - var v uint16 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_UI4: - var v uint32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_UI8: - var v uint64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_R4: - var v float32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_R8: - var v float64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_BSTR: - var v string - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v - case VT_VARIANT: - var v VARIANT - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) - values[i] = v.Value() - default: - // TODO - } - } - - return -} - -func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { - return safeArrayGetVartype(sac.Array) -} - -func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { - return safeArrayGetDim(sac.Array) -} - -func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { - return safeArrayGetElementSize(sac.Array) -} - -func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int64, err error) { - if index < 1 { - index = 1 - } - - // Get array bounds - var LowerBounds int64 - var UpperBounds int64 - - LowerBounds, err = safeArrayGetLBound(sac.Array, index) - if err != nil { - return - } - - UpperBounds, err = safeArrayGetUBound(sac.Array, index) - if err != nil { - return - } - - totalElements = UpperBounds - LowerBounds + 1 - return -} - -// Release Safe Array memory -func (sac *SafeArrayConversion) Release() { - safeArrayDestroy(sac.Array) -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go deleted file mode 100644 index a9fa885..0000000 --- a/vendor/github.com/go-ole/go-ole/safearrayslices.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -func safeArrayFromByteSlice(slice []byte) *SafeArray { - array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []byte to SAFEARRAY") - } - - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) - } - return array -} - -func safeArrayFromStringSlice(slice []string) *SafeArray { - array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []string to SAFEARRAY") - } - // SysAllocStringLen(s) - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) - } - return array -} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go deleted file mode 100644 index 99ee82d..0000000 --- a/vendor/github.com/go-ole/go-ole/utility.go +++ /dev/null @@ -1,101 +0,0 @@ -package ole - -import ( - "unicode/utf16" - "unsafe" -) - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -// -// Helper that provides check against both Class ID from Program ID and Class ID from string. It is -// faster, if you know which you are using, to use the individual functions, but this will check -// against available functions for you. -func ClassIDFrom(programID string) (classID *GUID, err error) { - classID, err = CLSIDFromProgID(programID) - if err != nil { - classID, err = CLSIDFromString(programID) - if err != nil { - return - } - } - return -} - -// BytePtrToString converts byte pointer to a Go string. -func BytePtrToString(p *byte) string { - a := (*[10000]uint8)(unsafe.Pointer(p)) - i := 0 - for a[i] != 0 { - i++ - } - return string(a[:i]) -} - -// UTF16PtrToString is alias for LpOleStrToString. -// -// Kept for compatibility reasons. -func UTF16PtrToString(p *uint16) string { - return LpOleStrToString(p) -} - -// LpOleStrToString converts COM Unicode to Go string. -func LpOleStrToString(p *uint16) string { - if p == nil { - return "" - } - - length := lpOleStrLen(p) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - - return string(utf16.Decode(a)) -} - -// BstrToString converts COM binary string to Go string. -func BstrToString(p *uint16) string { - if p == nil { - return "" - } - length := SysStringLen((*int16)(unsafe.Pointer(p))) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return string(utf16.Decode(a)) -} - -// lpOleStrLen returns the length of Unicode string. -func lpOleStrLen(p *uint16) (length int64) { - if p == nil { - return 0 - } - - ptr := unsafe.Pointer(p) - - for i := 0; ; i++ { - if 0 == *(*uint16)(ptr) { - length = int64(i) - break - } - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return -} - -// convertHresultToError converts syscall to error, if call is unsuccessful. -func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go deleted file mode 100644 index ebe00f1..0000000 --- a/vendor/github.com/go-ole/go-ole/variables.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" -) - -var ( - modcombase = syscall.NewLazyDLL("combase.dll") - modkernel32, _ = syscall.LoadDLL("kernel32.dll") - modole32, _ = syscall.LoadDLL("ole32.dll") - modoleaut32, _ = syscall.LoadDLL("oleaut32.dll") - modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll") - moduser32, _ = syscall.LoadDLL("user32.dll") -) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go deleted file mode 100644 index 3696972..0000000 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ /dev/null @@ -1,105 +0,0 @@ -package ole - -import "unsafe" - -// NewVariant returns new variant based on type and value. -func NewVariant(vt VT, val int64) VARIANT { - return VARIANT{VT: vt, Val: val} -} - -// ToIUnknown converts Variant to Unknown object. -func (v *VARIANT) ToIUnknown() *IUnknown { - if v.VT != VT_UNKNOWN { - return nil - } - return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToIDispatch converts variant to dispatch object. -func (v *VARIANT) ToIDispatch() *IDispatch { - if v.VT != VT_DISPATCH { - return nil - } - return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToArray converts variant to SafeArray helper. -func (v *VARIANT) ToArray() *SafeArrayConversion { - if v.VT != VT_SAFEARRAY { - if v.VT&VT_ARRAY == 0 { - return nil - } - } - var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) - return &SafeArrayConversion{safeArray} -} - -// ToString converts variant to Go string. -func (v *VARIANT) ToString() string { - if v.VT != VT_BSTR { - return "" - } - return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) -} - -// Clear the memory of variant object. -func (v *VARIANT) Clear() error { - return VariantClear(v) -} - -// Value returns variant value based on its type. -// -// Currently supported types: 2- and 4-byte integers, strings, bools. -// Note that 64-bit integers, datetimes, and other types are stored as strings -// and will be returned as strings. -// -// Needs to be further converted, because this returns an interface{}. -func (v *VARIANT) Value() interface{} { - switch v.VT { - case VT_I1: - return int8(v.Val) - case VT_UI1: - return uint8(v.Val) - case VT_I2: - return int16(v.Val) - case VT_UI2: - return uint16(v.Val) - case VT_I4: - return int32(v.Val) - case VT_UI4: - return uint32(v.Val) - case VT_I8: - return int64(v.Val) - case VT_UI8: - return uint64(v.Val) - case VT_INT: - return int(v.Val) - case VT_UINT: - return uint(v.Val) - case VT_INT_PTR: - return uintptr(v.Val) // TODO - case VT_UINT_PTR: - return uintptr(v.Val) - case VT_R4: - return *(*float32)(unsafe.Pointer(&v.Val)) - case VT_R8: - return *(*float64)(unsafe.Pointer(&v.Val)) - case VT_BSTR: - return v.ToString() - case VT_DATE: - // VT_DATE type will either return float64 or time.Time. - d := float64(v.Val) - date, err := GetVariantDate(d) - if err != nil { - return d - } - return date - case VT_UNKNOWN: - return v.ToIUnknown() - case VT_DISPATCH: - return v.ToIDispatch() - case VT_BOOL: - return v.Val != 0 - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go deleted file mode 100644 index e73736b..0000000 --- a/vendor/github.com/go-ole/go-ole/variant_386.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build 386 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go deleted file mode 100644 index dccdde1..0000000 --- a/vendor/github.com/go-ole/go-ole/variant_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build amd64 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go deleted file mode 100644 index 9874ca6..0000000 --- a/vendor/github.com/go-ole/go-ole/variant_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go deleted file mode 100644 index 729b4a0..0000000 --- a/vendor/github.com/go-ole/go-ole/vt_string.go +++ /dev/null @@ -1,58 +0,0 @@ -// generated by stringer -output vt_string.go -type VT; DO NOT EDIT - -package ole - -import "fmt" - -const ( - _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" - _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" - _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" - _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" - _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" - _VT_name_5 = "VT_ARRAY" - _VT_name_6 = "VT_BYREF" - _VT_name_7 = "VT_RESERVED" - _VT_name_8 = "VT_ILLEGAL" -) - -var ( - _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} - _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} - _VT_index_2 = [...]uint8{0, 9, 19, 30} - _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} - _VT_index_4 = [...]uint8{0, 12, 21} - _VT_index_5 = [...]uint8{0, 8} - _VT_index_6 = [...]uint8{0, 8} - _VT_index_7 = [...]uint8{0, 11} - _VT_index_8 = [...]uint8{0, 10} -) - -func (i VT) String() string { - switch { - case 0 <= i && i <= 14: - return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] - case 16 <= i && i <= 31: - i -= 16 - return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] - case 36 <= i && i <= 38: - i -= 36 - return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] - case 64 <= i && i <= 72: - i -= 64 - return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] - case 4095 <= i && i <= 4096: - i -= 4095 - return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] - case i == 8192: - return _VT_name_5 - case i == 16384: - return _VT_name_6 - case i == 32768: - return _VT_name_7 - case i == 65535: - return _VT_name_8 - default: - return fmt.Sprintf("VT(%d)", i) - } -} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go deleted file mode 100644 index 4e9eca7..0000000 --- a/vendor/github.com/go-ole/go-ole/winrt.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unicode/utf8" - "unsafe" -) - -var ( - procRoInitialize = modcombase.NewProc("RoInitialize") - procRoActivateInstance = modcombase.NewProc("RoActivateInstance") - procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") - procWindowsCreateString = modcombase.NewProc("WindowsCreateString") - procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") - procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") -) - -func RoInitialize(thread_type uint32) (err error) { - hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoActivateInstance.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoGetActivationFactory.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - u16 := syscall.StringToUTF16Ptr(s) - len := uint32(utf8.RuneCountInString(s)) - hr, _, _ := procWindowsCreateString.Call( - uintptr(unsafe.Pointer(u16)), - uintptr(len), - uintptr(unsafe.Pointer(&hstring))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// String returns Go string value of HString. -func (h HString) String() string { - var u16buf uintptr - var u16len uint32 - u16buf, _, _ = procWindowsGetStringRawBuffer.Call( - uintptr(h), - uintptr(unsafe.Pointer(&u16len))) - - u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} - u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) - return syscall.UTF16ToString(u16) -} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go deleted file mode 100644 index 52e6d74..0000000 --- a/vendor/github.com/go-ole/go-ole/winrt_doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package ole - -// RoInitialize -func RoInitialize(thread_type uint32) (err error) { - return NewError(E_NOTIMPL) -} - -// RoActivateInstance -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// RoGetActivationFactory -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - return HString(uintptr(0)), NewError(E_NOTIMPL) -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - return NewError(E_NOTIMPL) -} - -// String returns Go string value of HString. -func (h HString) String() string { - return "" -} diff --git a/vendor/github.com/go-stack/stack/.travis.yml b/vendor/github.com/go-stack/stack/.travis.yml deleted file mode 100644 index 5c5a2b5..0000000 --- a/vendor/github.com/go-stack/stack/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -sudo: false -go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -before_install: - - go get github.com/mattn/goveralls - -script: - - goveralls -service=travis-ci diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md deleted file mode 100644 index 2abf98e..0000000 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Chris Hines - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11cccc..0000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/go.mod b/vendor/github.com/go-stack/stack/go.mod deleted file mode 100644 index 96a53a1..0000000 --- a/vendor/github.com/go-stack/stack/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/go-stack/stack diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index ac3b93b..0000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,400 +0,0 @@ -// +build go1.7 - -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - frame runtime.Frame -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - // As of Go 1.9 we need room for up to three PC entries. - // - // 0. An entry for the stack frame prior to the target to check for - // special handling needed if that prior entry is runtime.sigpanic. - // 1. A possible second entry to hold metadata about skipped inlined - // functions. If inline functions were not skipped the target frame - // PC will be here. - // 2. A third entry for the target frame PC when the second entry - // is used for skipped inline functions. - var pcs [3]uintptr - n := runtime.Callers(skip+1, pcs[:]) - frames := runtime.CallersFrames(pcs[:n]) - frame, _ := frames.Next() - frame, _ = frames.Next() - - return Call{ - frame: frame, - } -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.frame == (runtime.Frame{}) { - return nil, ErrNoFunc - } - - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %k last segment of the package path -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH, -// or the module path joined to the path of source file relative -// to module root -// %#s full path of source file -// %+n import path qualified function name -// %+k full package path -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.frame == (runtime.Frame{}) { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file := c.frame.File - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = pkgFilePath(&c.frame) - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) - } - - case 'd': - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) - - case 'k': - name := c.frame.Function - const pathSep = "/" - start, end := 0, len(name) - if i := strings.LastIndex(name, pathSep); i != -1 { - start = i + len(pathSep) - } - const pkgSep = "." - if i := strings.Index(name[start:], pkgSep); i != -1 { - end = start + i - } - if s.Flag('+') { - start = 0 - } - io.WriteString(s, name[start:end]) - - case 'n': - name := c.frame.Function - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// Frame returns the call frame infomation for the Call. -func (c Call) Frame() runtime.Frame { - return c.frame -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -// -// Deprecated: Use Call.Frame instead. -func (c Call) PC() uintptr { - return c.frame.PC -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f64693..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c..0000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617..0000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index fa88add..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,607 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328b..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa919..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe0..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index a4b8c0c..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,544 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee72..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3a..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d..0000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa195..0000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae31..0000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea1287..0000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb03..0000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b..0000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f6..0000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f204..0000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e9..0000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91b..0000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979..0000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae90..0000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb..0000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692e..0000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb94..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be214..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c063..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index d4b9266..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 6483ba2..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 7421f32..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 2874a04..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a3..0000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f03..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bf..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index c0d70b2..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index d463e36..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) Write(out *dto.Metric) error { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - val := fval + float64(ival) - - return populateMetric(CounterValue, val, c.labelPairs, out) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *metricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 7b8827f..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - - vh := hashNew() - for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) - } - d.id = vh - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) - for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) - } - d.dimHash = lh - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(labelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 5d9525d..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the -// Prometheus server not to assume anything about its type. -// -// In addition to the fundamental metric types Gauge, Counter, Summary, -// Histogram, and Untyped, a very important part of the Prometheus data model is -// the partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// HistogramVec, and UntypedVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, -// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, -// SummaryVec, HistogramVec, and UntypedVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or -// UntypedOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). That will happen in -// the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still -// be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// (The top-level functions in the prometheus package are deprecated.) -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index 18a99d5..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a7..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index 71d406b..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, out) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *metricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index ba3b933..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "runtime" - "runtime/debug" - "time" -) - -type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc - - // metrics to describe and collect - metrics memStatsMetrics -} - -// NewGoCollector returns a collector which exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This causes a stop-the-world, which is very short with Go1.9+ -// (~25µs). However, with older Go versions, the stop-the-world duration depends -// on the heap size and can be quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -func NewGoCollector() Collector { - return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the GC invocation durations.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - metrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.goInfoDesc - for _, i := range c.metrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index f88da70..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - buckets []uint64 -} - -type histogram struct { - // countAndHotIdx is a complicated one. For lock-free yet atomic - // observations, we need to save the total count of observations again, - // combined with the index of the currently-hot counts struct, so that - // we can perform the operation on both values atomically. The least - // significant bit defines the hot counts struct. The remaining 63 bits - // represent the total count of observations. This happens under the - // assumption that the 63bit count will never overflow. Rationale: An - // observations takes about 30ns. Let's assume it could happen in - // 10ns. Overflowing the counter will then take at least (2^63)*10ns, - // which is about 3000 years. - // - // This has to be first in the struct for 64bit alignment. See - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - upperBounds []float64 - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - hotIdx int // Index of currently-hot counts. Only used within Write. - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - - // We increment h.countAndHotIdx by 2 so that the counter in the upper - // 63 bits gets incremented by 1. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 2) - hotCounts := h.counts[n%2] - - if i < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[i], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (h *histogram) Write(out *dto.Metric) error { - var ( - his = &dto.Histogram{} - buckets = make([]*dto.Bucket, len(h.upperBounds)) - hotCounts, coldCounts *histogramCounts - count uint64 - ) - - // For simplicity, we mutex the rest of this method. It is not in the - // hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() - - // This is a bit arcane, which is why the following spells out this if - // clause in English: - // - // If the currently-hot counts struct is #0, we atomically increment - // h.countAndHotIdx by 1 so that from now on Observe will use the counts - // struct #1. Furthermore, the atomic increment gives us the new value, - // which, in its most significant 63 bits, tells us the count of - // observations done so far up to and including currently ongoing - // observations still using the counts struct just changed from hot to - // cold. To have a normal uint64 for the count, we bitshift by 1 and - // save the result in count. We also set h.hotIdx to 1 for the next - // Write call, and we will refer to counts #1 as hotCounts and to counts - // #0 as coldCounts. - // - // If the currently-hot counts struct is #1, we do the corresponding - // things the other way round. We have to _decrement_ h.countAndHotIdx - // (which is a bit arcane in itself, as we have to express -1 with an - // unsigned int...). - if h.hotIdx == 0 { - count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 - h.hotIdx = 1 - hotCounts = h.counts[1] - coldCounts = h.counts[0] - } else { - count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. - h.hotIdx = 0 - hotCounts = h.counts[0] - coldCounts = h.counts[1] - } - - // Now we have to wait for the now-declared-cold counts to actually cool - // down, i.e. wait for all observations still using it to finish. That's - // the case once the count in the cold counts struct is the same as the - // one atomically retrieved from the upper 63bits of h.countAndHotIdx. - for { - if count == atomic.LoadUint64(&coldCounts.count) { - break - } - runtime.Gosched() // Let observations get work done. - } - - his.SampleCount = proto.Uint64(count) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - } - - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *metricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index 9f0875b..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "compress/gzip" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" -) - -// TODO(beorn7): Remove this whole file. It is a partial mirror of -// promhttp/http.go (to avoid circular import chains) where everything HTTP -// related should live. The functions here are just for avoiding -// breakage. Everything is deprecated. - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an HTTP handler for the DefaultGatherer. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). -// -// Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using promhttp.Handler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", UninstrumentedHandler()) -} - -// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. -// -// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) -// instead. See there for further documentation. -func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - mfs, err := DefaultGatherer.Gather() - if err != nil { - httpError(rsp, err) - return - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - httpError(rsp, err) - return - } - } - }) -} - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: (1) It uses Summaries -// rather than Histograms. Summaries are not useful if aggregation across -// multiple instances is required. (2) It uses microseconds as unit, which is -// deprecated and should be replaced by seconds. (3) The size of the request is -// calculated in a separate goroutine. Since this calculator requires access to -// the request header, it creates a race with any writes to the header performed -// during request handling. httputil.ReverseProxy is a prominent example for a -// handler performing such writes. (4) It has additional issues with HTTP/2, cf. -// https://github.com/prometheus/client_golang/issues/272. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -// -// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -// -// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -// -// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. Use the tooling provided in package promhttp instead. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - if err := Register(reqCnt); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqCnt = are.ExistingCollector.(*CounterVec) - } else { - panic(err) - } - } - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - if err := Register(reqDur); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqDur = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - if err := Register(reqSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - reqSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - if err := Register(resSz); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - resSz = are.ExistingCollector.(Summary) - } else { - panic(err) - } - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := computeApproximateRequestSize(r) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - reqCnt.WithLabelValues(method, code).Inc() - reqDur.Observe(elapsed) - resSz.Observe(float64(delegate.written)) - reqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current goroutine for avoiding a race condition. - // HandlerFunc that runs in parallel may modify the URL. - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - out := make(chan int, 1) - - go func() { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s - close(out) - }() - - return out -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 351c26e..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2744443..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 55e6d86..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - "time" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -const separatorByte byte = 255 - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 5806cd0..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 55176d5..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "os" - - "github.com/prometheus/procfs" -) - -type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// Currently, the collector depends on a Linux-style proc filesystem and -// therefore only exports metrics for Linux. -// -// Note: An older version of this function had the following signature: -// -// NewProcessCollector(pid int, namespace string) Collector -// -// Most commonly, it was called as -// -// NewProcessCollector(os.Getpid(), "") -// -// The following call of the current version is equivalent to the above: -// -// NewProcessCollector(ProcessCollectorOpts{}) -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } - } - - return c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.NewStat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.NewLimits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 67b56d3..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) - if r.observeWriteHeader != nil { - r.observeWriteHeader(code) - } -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} -func (d flusherDelegator) Flush() { - d.ResponseWriter.(http.Flusher).Flush() -} -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go deleted file mode 100644 index 31a7069..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -type pusherDelegator struct{ *responseWriterDelegator } - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func init() { - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go deleted file mode 100644 index 8bb9b8b..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package promhttp - -import ( - "io" - "net/http" -) - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 668eb6b..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "compress/gzip" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. -// -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. -func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) -} - -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var inFlightSem chan struct{} - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } - mfs, err := reg.Gather() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) - return - } - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - - contentType := expfmt.Negotiate(req.Header) - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - var lastErr error - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - lastErr = err - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - // Handled later. - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - } - - if lastErr != nil { - httpError(rsp, lastErr) - } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) - } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. It is recommended to at least - // log errors (by providing an ErrorLog in HandlerOpts) to not mask - // errors completely. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerErrer. Error contents is -// supposed to be uncompressed plain text. However, same as with a plain -// http.Error, any header settings will be void if the header has already been -// sent. The error message will still be written to the writer, but it will -// probably be of limited use. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 86fd564..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - }) -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(counter) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() - } - return resp, err - }) -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(obs) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) - } - return resp, err - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go deleted file mode 100644 index a034d1e..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package promhttp - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" -) - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index 9db2438..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) - }) -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have zero, one, or two non-const non-curried labels. For those, the only -// allowed label names are "code" and "method". The function panics -// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or -// HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(counter) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() - }) -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) - }) - next.ServeHTTP(d, r) - }) -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) - }) -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the response size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { - code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) - }) -} - -func checkLabels(c prometheus.Collector) (code bool, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index f98c81a..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,931 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe methed does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // Just a sum of all desc IDs. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, add it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID += desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - return AlreadyRegisteredError{ - ExistingCollector: existing, - NewCollector: c, - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // Just a sum of the desc IDs. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := hashNew() - h = hashAdd(h, name) - h = hashAddByte(h, separatorByte) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - sort.Sort(labelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetName()) - h = hashAddByte(h, separatorByte) - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[h] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(labelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index 2980614..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,626 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -// DefObjectives are the default Summary quantile values. -// -// Deprecated: DefObjectives will not be used as the default objectives in -// v0.10 of the library. The default Summary will have no quantiles then. -var ( - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v0.10 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is DefObjectives. It is used if Objectives is left at - // its zero value (i.e. nil). To create a Summary without Objectives, - // set it to an empty map (i.e. map[float64]float64{}). - // - // Deprecated: Note that the current value of DefObjectives is - // deprecated. It will be replaced by an empty map in v0.10 of the - // library. Please explicitly set Objectives to the desired value. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Great fuck-up with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = DefObjectives - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *metricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index b8fc5f1..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. ObserveDuration is -// usually called with a defer statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() { - if t.observer != nil { - t.observer.Observe(time.Since(t.begin).Seconds()) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index eb248f1..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) - return labelPairs -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 14ed9e8..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. It uses basicMetricVec internally. -type metricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &metricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 49159bf..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics exposed. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(labelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e4..0000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 961f1fb..0000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,633 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto - -package io_prometheus_client // import "github.com/prometheus/client_model/go" - -import ( - fmt "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} -} -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (dst *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(dst, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} -} -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (dst *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(dst, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} -} -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (dst *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(dst, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} -} -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (dst *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(dst, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} -} -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (dst *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(dst, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} -} -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (dst *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(dst, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (dst *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(dst, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} -} -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (dst *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(dst, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (dst *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(dst, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} -} -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (dst *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(dst, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } - -var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ - // 591 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, - 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, - 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, - 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, - 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, - 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, - 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, - 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, - 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, - 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, - 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, - 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, - 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, - 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, - 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, - 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, - 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, - 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, - 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, - 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, - 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, - 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, - 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, - 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, - 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, - 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, - 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, - 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, - 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, - 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, - 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, - 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, - 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, - 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, - 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, - 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1..0000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index c092723..0000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurrs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 11839ed..0000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index c71bcb9..0000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eede..0000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 8e473d0..0000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialBufSize = 512 - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, initialBufSize)) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bytes.Buffer from the sync.Pool and write out its content to out in a - // single go in the end. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bytes.Buffer) - b.Reset() - w = b - defer func() { - bWritten, bErr := out.Write(b.Bytes()) - written = bWritten - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } else { - return escaper.WriteString(w, v) - } -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index ec3d86b..0000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,757 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656..0000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38c..0000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c..0000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de41..0000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 038fc1c..0000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index 41051a0..0000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a..0000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index f725090..0000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - separator = []byte{0} - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b9691..0000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13..0000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index bb99889..0000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 46259b1..0000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - *t = Time(v + va) - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" - ) - if ms == 0 { - return "0s" - } - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, - } - - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" - } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9d8fb1..0000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659..0000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 40503ed..0000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) the maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index 35993c4..0000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1 +0,0 @@ -* Tobias Schmidt diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 4d10983..0000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) - -ifdef DEBUG - bindata_flags = -debug -endif - -STATICCHECK_IGNORE = - -all: format staticcheck build test - -style: - @echo ">> checking code style" - @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' - -check_license: - @echo ">> checking license header" - @./scripts/check_license.sh - -test: fixtures/.unpacked sysfs/fixtures/.unpacked - @echo ">> running all tests" - @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) - -%/.unpacked: %.ttar - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: fixtures.ttar sysfs/fixtures.ttar - -%fixtures.ttar: %/fixtures - rm -v $(dir $*)fixtures/.unpacked - ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ - -$(FIRST_GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck - -.PHONY: all style check_license format test vet staticcheck - -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/staticcheck diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9a..0000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 2095494..0000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index d3a8268..0000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// NewBuddyInfo reads the buddyinfo statistics. -func NewBuddyInfo() ([]BuddyInfo, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewBuddyInfo() -} - -// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") - } - - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d..0000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 13c831e..0000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,462 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/mountstats -Lines: 19 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/short -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/short/buddyinfo -Lines: 3 -Node 0, zone -Node 0, zone -Node 0, zone -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/sizemismatch -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/sizemismatch/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/buddyinfo/valid -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/buddyinfo/valid/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/mdstat -Lines: 26 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0] sdb3[1] - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2] sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/.unpacked -Lines: 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index b6c6b2c..0000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "path" - - "github.com/prometheus/procfs/nfs" - "github.com/prometheus/procfs/xfs" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} - -// XFSStats retrieves XFS filesystem runtime statistics. -func (fs FS) XFSStats() (*xfs.Stats, error) { - f, err := os.Open(fs.Path("fs/xfs/stat")) - if err != nil { - return nil, err - } - defer f.Close() - - return xfs.ParseStats(f) -} - -// NFSClientRPCStats retrieves NFS client RPC statistics. -func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfs")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseClientRPCStats(f) -} - -// NFSdServerRPCStats retrieves NFS daemon RPC statistics. -func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { - f, err := os.Open(fs.Path("net/rpc/nfsd")) - if err != nil { - return nil, err - } - defer f.Close() - - return nfs.ParseServerRPCStats(f) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 2ff228e..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go deleted file mode 100644 index df0d567..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - b := make([]byte, 128) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index e36d4a3..0000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(file) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) - } - default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 9dc1958..0000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device consists of. - DisksTotal int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 -} - -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) - if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { - continue - } - - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) - } - mdName := mainLine[0] - activityState := mainLine[2] - - if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) - } - - active, total, size, err := evalStatusline(lines[i+1]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - // j is the line number of the syncing-line. - j := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - } - - mdStates = append(mdStates, MDStat{ - Name: mdName, - ActivityState: activityState, - DisksActive: active, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) - } - - return mdStates, nil -} - -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - return active, total, size, nil -} - -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) - } - - return syncedBlocks, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 7a8a1e0..0000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueTime time.Duration - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseTime time.Duration - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestTime time.Duration -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTime time.Duration - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - - switch ss[0] { - case fieldAge: - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) != numFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, - CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, - CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, - }) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - - return &NFSTransportStats{ - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTime: time.Duration(ns[4]) * time.Second, - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index 3f25233..0000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func NewNetDev() (NetDev, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetDev() -} - -// NewNetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NewNetDev() (NetDev, error) { - return newNetDev(fs.Path("net/dev")) -} - -// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NewNetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - nd := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := nd.parseLine(s.Text()) - if err != nil { - return nd, err - } - - nd[line.Name] = *line - } - - return nd, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(parts[1])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(parts[0]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (nd NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(nd)) - for _, ifc := range nd { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go deleted file mode 100644 index 651bf68..0000000 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package nfs implements parsing of /proc/net/rpc/nfsd. -// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ -package nfs - -// ReplyCache models the "rc" line. -type ReplyCache struct { - Hits uint64 - Misses uint64 - NoCache uint64 -} - -// FileHandles models the "fh" line. -type FileHandles struct { - Stale uint64 - TotalLookups uint64 - AnonLookups uint64 - DirNoCache uint64 - NoDirNoCache uint64 -} - -// InputOutput models the "io" line. -type InputOutput struct { - Read uint64 - Write uint64 -} - -// Threads models the "th" line. -type Threads struct { - Threads uint64 - FullCnt uint64 -} - -// ReadAheadCache models the "ra" line. -type ReadAheadCache struct { - CacheSize uint64 - CacheHistogram []uint64 - NotFound uint64 -} - -// Network models the "net" line. -type Network struct { - NetCount uint64 - UDPCount uint64 - TCPCount uint64 - TCPConnect uint64 -} - -// ClientRPC models the nfs "rpc" line. -type ClientRPC struct { - RPCCount uint64 - Retransmissions uint64 - AuthRefreshes uint64 -} - -// ServerRPC models the nfsd "rpc" line. -type ServerRPC struct { - RPCCount uint64 - BadCnt uint64 - BadFmt uint64 - BadAuth uint64 - BadcInt uint64 -} - -// V2Stats models the "proc2" line. -type V2Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Root uint64 - Lookup uint64 - ReadLink uint64 - Read uint64 - WrCache uint64 - Write uint64 - Create uint64 - Remove uint64 - Rename uint64 - Link uint64 - SymLink uint64 - MkDir uint64 - RmDir uint64 - ReadDir uint64 - FsStat uint64 -} - -// V3Stats models the "proc3" line. -type V3Stats struct { - Null uint64 - GetAttr uint64 - SetAttr uint64 - Lookup uint64 - Access uint64 - ReadLink uint64 - Read uint64 - Write uint64 - Create uint64 - MkDir uint64 - SymLink uint64 - MkNod uint64 - Remove uint64 - RmDir uint64 - Rename uint64 - Link uint64 - ReadDir uint64 - ReadDirPlus uint64 - FsStat uint64 - FsInfo uint64 - PathConf uint64 - Commit uint64 -} - -// ClientV4Stats models the nfs "proc4" line. -type ClientV4Stats struct { - Null uint64 - Read uint64 - Write uint64 - Commit uint64 - Open uint64 - OpenConfirm uint64 - OpenNoattr uint64 - OpenDowngrade uint64 - Close uint64 - Setattr uint64 - FsInfo uint64 - Renew uint64 - SetClientID uint64 - SetClientIDConfirm uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Access uint64 - Getattr uint64 - Lookup uint64 - LookupRoot uint64 - Remove uint64 - Rename uint64 - Link uint64 - Symlink uint64 - Create uint64 - Pathconf uint64 - StatFs uint64 - ReadLink uint64 - ReadDir uint64 - ServerCaps uint64 - DelegReturn uint64 - GetACL uint64 - SetACL uint64 - FsLocations uint64 - ReleaseLockowner uint64 - Secinfo uint64 - FsidPresent uint64 - ExchangeID uint64 - CreateSession uint64 - DestroySession uint64 - Sequence uint64 - GetLeaseTime uint64 - ReclaimComplete uint64 - LayoutGet uint64 - GetDeviceInfo uint64 - LayoutCommit uint64 - LayoutReturn uint64 - SecinfoNoName uint64 - TestStateID uint64 - FreeStateID uint64 - GetDeviceList uint64 - BindConnToSession uint64 - DestroyClientID uint64 - Seek uint64 - Allocate uint64 - DeAllocate uint64 - LayoutStats uint64 - Clone uint64 -} - -// ServerV4Stats models the nfsd "proc4" line. -type ServerV4Stats struct { - Null uint64 - Compound uint64 -} - -// V4Ops models the "proc4ops" line: NFSv4 operations -// Variable list, see: -// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) -// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) -// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) -type V4Ops struct { - //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? - Op0Unused uint64 - Op1Unused uint64 - Op2Future uint64 - Access uint64 - Close uint64 - Commit uint64 - Create uint64 - DelegPurge uint64 - DelegReturn uint64 - GetAttr uint64 - GetFH uint64 - Link uint64 - Lock uint64 - Lockt uint64 - Locku uint64 - Lookup uint64 - LookupRoot uint64 - Nverify uint64 - Open uint64 - OpenAttr uint64 - OpenConfirm uint64 - OpenDgrd uint64 - PutFH uint64 - PutPubFH uint64 - PutRootFH uint64 - Read uint64 - ReadDir uint64 - ReadLink uint64 - Remove uint64 - Rename uint64 - Renew uint64 - RestoreFH uint64 - SaveFH uint64 - SecInfo uint64 - SetAttr uint64 - Verify uint64 - Write uint64 - RelLockOwner uint64 -} - -// ClientRPCStats models all stats from /proc/net/rpc/nfs. -type ClientRPCStats struct { - Network Network - ClientRPC ClientRPC - V2Stats V2Stats - V3Stats V3Stats - ClientV4Stats ClientV4Stats -} - -// ServerRPCStats models all stats from /proc/net/rpc/nfsd. -type ServerRPCStats struct { - ReplyCache ReplyCache - FileHandles FileHandles - InputOutput InputOutput - Threads Threads - ReadAheadCache ReadAheadCache - Network Network - ServerRPC ServerRPC - V2Stats V2Stats - V3Stats V3Stats - ServerV4Stats ServerV4Stats - V4Ops V4Ops -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go deleted file mode 100644 index 95a83cc..0000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "fmt" -) - -func parseReplyCache(v []uint64) (ReplyCache, error) { - if len(v) != 3 { - return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) - } - - return ReplyCache{ - Hits: v[0], - Misses: v[1], - NoCache: v[2], - }, nil -} - -func parseFileHandles(v []uint64) (FileHandles, error) { - if len(v) != 5 { - return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) - } - - return FileHandles{ - Stale: v[0], - TotalLookups: v[1], - AnonLookups: v[2], - DirNoCache: v[3], - NoDirNoCache: v[4], - }, nil -} - -func parseInputOutput(v []uint64) (InputOutput, error) { - if len(v) != 2 { - return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) - } - - return InputOutput{ - Read: v[0], - Write: v[1], - }, nil -} - -func parseThreads(v []uint64) (Threads, error) { - if len(v) != 2 { - return Threads{}, fmt.Errorf("invalid Threads line %q", v) - } - - return Threads{ - Threads: v[0], - FullCnt: v[1], - }, nil -} - -func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { - if len(v) != 12 { - return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) - } - - return ReadAheadCache{ - CacheSize: v[0], - CacheHistogram: v[1:11], - NotFound: v[11], - }, nil -} - -func parseNetwork(v []uint64) (Network, error) { - if len(v) != 4 { - return Network{}, fmt.Errorf("invalid Network line %q", v) - } - - return Network{ - NetCount: v[0], - UDPCount: v[1], - TCPCount: v[2], - TCPConnect: v[3], - }, nil -} - -func parseServerRPC(v []uint64) (ServerRPC, error) { - if len(v) != 5 { - return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ServerRPC{ - RPCCount: v[0], - BadCnt: v[1], - BadFmt: v[2], - BadAuth: v[3], - BadcInt: v[4], - }, nil -} - -func parseClientRPC(v []uint64) (ClientRPC, error) { - if len(v) != 3 { - return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) - } - - return ClientRPC{ - RPCCount: v[0], - Retransmissions: v[1], - AuthRefreshes: v[2], - }, nil -} - -func parseV2Stats(v []uint64) (V2Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 18 { - return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) - } - - return V2Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Root: v[4], - Lookup: v[5], - ReadLink: v[6], - Read: v[7], - WrCache: v[8], - Write: v[9], - Create: v[10], - Remove: v[11], - Rename: v[12], - Link: v[13], - SymLink: v[14], - MkDir: v[15], - RmDir: v[16], - ReadDir: v[17], - FsStat: v[18], - }, nil -} - -func parseV3Stats(v []uint64) (V3Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 22 { - return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) - } - - return V3Stats{ - Null: v[1], - GetAttr: v[2], - SetAttr: v[3], - Lookup: v[4], - Access: v[5], - ReadLink: v[6], - Read: v[7], - Write: v[8], - Create: v[9], - MkDir: v[10], - SymLink: v[11], - MkNod: v[12], - Remove: v[13], - RmDir: v[14], - Rename: v[15], - Link: v[16], - ReadDir: v[17], - ReadDirPlus: v[18], - FsStat: v[19], - FsInfo: v[20], - PathConf: v[21], - Commit: v[22], - }, nil -} - -func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values { - return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) - } - - // This function currently supports mapping 59 NFS v4 client stats. Older - // kernels may emit fewer stats, so we must detect this and pad out the - // values to match the expected slice size. - if values < 59 { - newValues := make([]uint64, 60) - copy(newValues, v) - v = newValues - } - - return ClientV4Stats{ - Null: v[1], - Read: v[2], - Write: v[3], - Commit: v[4], - Open: v[5], - OpenConfirm: v[6], - OpenNoattr: v[7], - OpenDowngrade: v[8], - Close: v[9], - Setattr: v[10], - FsInfo: v[11], - Renew: v[12], - SetClientID: v[13], - SetClientIDConfirm: v[14], - Lock: v[15], - Lockt: v[16], - Locku: v[17], - Access: v[18], - Getattr: v[19], - Lookup: v[20], - LookupRoot: v[21], - Remove: v[22], - Rename: v[23], - Link: v[24], - Symlink: v[25], - Create: v[26], - Pathconf: v[27], - StatFs: v[28], - ReadLink: v[29], - ReadDir: v[30], - ServerCaps: v[31], - DelegReturn: v[32], - GetACL: v[33], - SetACL: v[34], - FsLocations: v[35], - ReleaseLockowner: v[36], - Secinfo: v[37], - FsidPresent: v[38], - ExchangeID: v[39], - CreateSession: v[40], - DestroySession: v[41], - Sequence: v[42], - GetLeaseTime: v[43], - ReclaimComplete: v[44], - LayoutGet: v[45], - GetDeviceInfo: v[46], - LayoutCommit: v[47], - LayoutReturn: v[48], - SecinfoNoName: v[49], - TestStateID: v[50], - FreeStateID: v[51], - GetDeviceList: v[52], - BindConnToSession: v[53], - DestroyClientID: v[54], - Seek: v[55], - Allocate: v[56], - DeAllocate: v[57], - LayoutStats: v[58], - Clone: v[59], - }, nil -} - -func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { - values := int(v[0]) - if len(v[1:]) != values || values != 2 { - return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) - } - - return ServerV4Stats{ - Null: v[1], - Compound: v[2], - }, nil -} - -func parseV4Ops(v []uint64) (V4Ops, error) { - values := int(v[0]) - if len(v[1:]) != values || values < 39 { - return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) - } - - stats := V4Ops{ - Op0Unused: v[1], - Op1Unused: v[2], - Op2Future: v[3], - Access: v[4], - Close: v[5], - Commit: v[6], - Create: v[7], - DelegPurge: v[8], - DelegReturn: v[9], - GetAttr: v[10], - GetFH: v[11], - Link: v[12], - Lock: v[13], - Lockt: v[14], - Locku: v[15], - Lookup: v[16], - LookupRoot: v[17], - Nverify: v[18], - Open: v[19], - OpenAttr: v[20], - OpenConfirm: v[21], - OpenDgrd: v[22], - PutFH: v[23], - PutPubFH: v[24], - PutRootFH: v[25], - Read: v[26], - ReadDir: v[27], - ReadLink: v[28], - Remove: v[29], - Rename: v[30], - Renew: v[31], - RestoreFH: v[32], - SaveFH: v[33], - SecInfo: v[34], - SetAttr: v[35], - Verify: v[36], - Write: v[37], - RelLockOwner: v[38], - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go deleted file mode 100644 index c0d3a5a..0000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs -func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { - stats := &ClientRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFS metric line %q", line) - } - - values, err := util.ParseUint64s(parts[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing NFS metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ClientRPC, err = parseClientRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ClientV4Stats, err = parseClientV4Stats(values) - default: - return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFS file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go deleted file mode 100644 index 57bb4a3..0000000 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package nfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd -func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { - stats := &ServerRPCStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - return nil, fmt.Errorf("invalid NFSd metric line %q", line) - } - label := parts[0] - - var values []uint64 - var err error - if label == "th" { - if len(parts) < 3 { - return nil, fmt.Errorf("invalid NFSd th metric line %q", line) - } - values, err = util.ParseUint64s(parts[1:3]) - } else { - values, err = util.ParseUint64s(parts[1:]) - } - if err != nil { - return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) - } - - switch metricLine := parts[0]; metricLine { - case "rc": - stats.ReplyCache, err = parseReplyCache(values) - case "fh": - stats.FileHandles, err = parseFileHandles(values) - case "io": - stats.InputOutput, err = parseInputOutput(values) - case "th": - stats.Threads, err = parseThreads(values) - case "ra": - stats.ReadAheadCache, err = parseReadAheadCache(values) - case "net": - stats.Network, err = parseNetwork(values) - case "rpc": - stats.ServerRPC, err = parseServerRPC(values) - case "proc2": - stats.V2Stats, err = parseV2Stats(values) - case "proc3": - stats.V3Stats, err = parseV3Stats(values) - case "proc4": - stats.ServerV4Stats, err = parseServerV4Stats(values) - case "proc4ops": - stats.V4Ops, err = parseV4Ops(values) - default: - return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) - } - if err != nil { - return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) - } - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning NFSd file: %s", err) - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 06bed0e..0000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// NewProc returns a process for the given pid. -func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot) -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 0251c83..0000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "os" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { - pio := ProcIO{} - - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index f04ba6f..0000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime int64 - // Maximum size of files that the process may create. - FileSize int64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize int64 - // Maximum size of the process stack in bytes. - StackSize int64 - // Maximum size of a core file. - CoreFileSize int64 - // Limit of the process's resident set in pages. - ResidentSet int64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes int64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles int64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks int64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals int64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize int64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority int64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout int64 -} - -const ( - limitsFields = 3 - limitsUnlimited = "unlimited" -) - -var ( - limitsDelimiter = regexp.MustCompile(" +") -) - -// NewLimits returns the current soft limits of the process. -func (p Proc) NewLimits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) - } - - switch fields[0] { - case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) - case "Max file size": - l.FileSize, err = parseInt(fields[1]) - case "Max data size": - l.DataSize, err = parseInt(fields[1]) - case "Max stack size": - l.StackSize, err = parseInt(fields[1]) - case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) - case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) - case "Max processes": - l.Processes, err = parseInt(fields[1]) - case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) - case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) - case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) - case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) - case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) - case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) - case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseInt(s string) (int64, error) { - if s == limitsUnlimited { - return -1, nil - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index d06c26e..0000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the -// process is a member. -func (p Proc) NewNamespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 3cf2a9f..0000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize int - // Resident set size in pages. - RSS int - - fs FS -} - -// NewStat returns the current status information of the process. -func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, fs: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 61eb6b0..0000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU []CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt - - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - stat := Stat{} - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index b0171a1..0000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,389 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C ] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE - -while getopts :cf:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceeded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceeded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceeded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -e "$path" ] || [ -L "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go deleted file mode 100644 index 8f1508f..0000000 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int - XfrmOutStateInvalid int - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go deleted file mode 100644 index 2bc0ef3..0000000 --- a/vendor/github.com/prometheus/procfs/xfs/parse.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xfs - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ParseStats parses a Stats from an input io.Reader, using the format -// found in /proc/fs/xfs/stat. -func ParseStats(r io.Reader) (*Stats, error) { - const ( - // Fields parsed into stats structures. - fieldExtentAlloc = "extent_alloc" - fieldAbt = "abt" - fieldBlkMap = "blk_map" - fieldBmbt = "bmbt" - fieldDir = "dir" - fieldTrans = "trans" - fieldIg = "ig" - fieldLog = "log" - fieldRw = "rw" - fieldAttr = "attr" - fieldIcluster = "icluster" - fieldVnodes = "vnodes" - fieldBuf = "buf" - fieldXpc = "xpc" - - // Unimplemented at this time due to lack of documentation. - fieldPushAil = "push_ail" - fieldXstrat = "xstrat" - fieldAbtb2 = "abtb2" - fieldAbtc2 = "abtc2" - fieldBmbt2 = "bmbt2" - fieldIbt2 = "ibt2" - fieldFibt2 = "fibt2" - fieldQm = "qm" - fieldDebug = "debug" - ) - - var xfss Stats - - s := bufio.NewScanner(r) - for s.Scan() { - // Expect at least a string label and a single integer value, ex: - // - abt 0 - // - rw 1 2 - ss := strings.Fields(string(s.Bytes())) - if len(ss) < 2 { - continue - } - label := ss[0] - - // Extended precision counters are uint64 values. - if label == fieldXpc { - us, err := util.ParseUint64s(ss[1:]) - if err != nil { - return nil, err - } - - xfss.ExtendedPrecision, err = extendedPrecisionStats(us) - if err != nil { - return nil, err - } - - continue - } - - // All other counters are uint32 values. - us, err := util.ParseUint32s(ss[1:]) - if err != nil { - return nil, err - } - - switch label { - case fieldExtentAlloc: - xfss.ExtentAllocation, err = extentAllocationStats(us) - case fieldAbt: - xfss.AllocationBTree, err = btreeStats(us) - case fieldBlkMap: - xfss.BlockMapping, err = blockMappingStats(us) - case fieldBmbt: - xfss.BlockMapBTree, err = btreeStats(us) - case fieldDir: - xfss.DirectoryOperation, err = directoryOperationStats(us) - case fieldTrans: - xfss.Transaction, err = transactionStats(us) - case fieldIg: - xfss.InodeOperation, err = inodeOperationStats(us) - case fieldLog: - xfss.LogOperation, err = logOperationStats(us) - case fieldRw: - xfss.ReadWrite, err = readWriteStats(us) - case fieldAttr: - xfss.AttributeOperation, err = attributeOperationStats(us) - case fieldIcluster: - xfss.InodeClustering, err = inodeClusteringStats(us) - case fieldVnodes: - xfss.Vnode, err = vnodeStats(us) - case fieldBuf: - xfss.Buffer, err = bufferStats(us) - } - if err != nil { - return nil, err - } - } - - return &xfss, s.Err() -} - -// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. -func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { - if l := len(us); l != 4 { - return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) - } - - return ExtentAllocationStats{ - ExtentsAllocated: us[0], - BlocksAllocated: us[1], - ExtentsFreed: us[2], - BlocksFreed: us[3], - }, nil -} - -// btreeStats builds a BTreeStats from a slice of uint32s. -func btreeStats(us []uint32) (BTreeStats, error) { - if l := len(us); l != 4 { - return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) - } - - return BTreeStats{ - Lookups: us[0], - Compares: us[1], - RecordsInserted: us[2], - RecordsDeleted: us[3], - }, nil -} - -// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. -func blockMappingStats(us []uint32) (BlockMappingStats, error) { - if l := len(us); l != 7 { - return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) - } - - return BlockMappingStats{ - Reads: us[0], - Writes: us[1], - Unmaps: us[2], - ExtentListInsertions: us[3], - ExtentListDeletions: us[4], - ExtentListLookups: us[5], - ExtentListCompares: us[6], - }, nil -} - -// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. -func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { - if l := len(us); l != 4 { - return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) - } - - return DirectoryOperationStats{ - Lookups: us[0], - Creates: us[1], - Removes: us[2], - Getdents: us[3], - }, nil -} - -// TransactionStats builds a TransactionStats from a slice of uint32s. -func transactionStats(us []uint32) (TransactionStats, error) { - if l := len(us); l != 3 { - return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) - } - - return TransactionStats{ - Sync: us[0], - Async: us[1], - Empty: us[2], - }, nil -} - -// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. -func inodeOperationStats(us []uint32) (InodeOperationStats, error) { - if l := len(us); l != 7 { - return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) - } - - return InodeOperationStats{ - Attempts: us[0], - Found: us[1], - Recycle: us[2], - Missed: us[3], - Duplicate: us[4], - Reclaims: us[5], - AttributeChange: us[6], - }, nil -} - -// LogOperationStats builds a LogOperationStats from a slice of uint32s. -func logOperationStats(us []uint32) (LogOperationStats, error) { - if l := len(us); l != 5 { - return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) - } - - return LogOperationStats{ - Writes: us[0], - Blocks: us[1], - NoInternalBuffers: us[2], - Force: us[3], - ForceSleep: us[4], - }, nil -} - -// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. -func readWriteStats(us []uint32) (ReadWriteStats, error) { - if l := len(us); l != 2 { - return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) - } - - return ReadWriteStats{ - Read: us[0], - Write: us[1], - }, nil -} - -// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. -func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { - if l := len(us); l != 4 { - return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) - } - - return AttributeOperationStats{ - Get: us[0], - Set: us[1], - Remove: us[2], - List: us[3], - }, nil -} - -// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. -func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { - if l := len(us); l != 3 { - return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) - } - - return InodeClusteringStats{ - Iflush: us[0], - Flush: us[1], - FlushInode: us[2], - }, nil -} - -// VnodeStats builds a VnodeStats from a slice of uint32s. -func vnodeStats(us []uint32) (VnodeStats, error) { - // The attribute "Free" appears to not be available on older XFS - // stats versions. Therefore, 7 or 8 elements may appear in - // this slice. - l := len(us) - if l != 7 && l != 8 { - return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) - } - - s := VnodeStats{ - Active: us[0], - Allocate: us[1], - Get: us[2], - Hold: us[3], - Release: us[4], - Reclaim: us[5], - Remove: us[6], - } - - // Skip adding free, unless it is present. The zero value will - // be used in place of an actual count. - if l == 7 { - return s, nil - } - - s.Free = us[7] - return s, nil -} - -// BufferStats builds a BufferStats from a slice of uint32s. -func bufferStats(us []uint32) (BufferStats, error) { - if l := len(us); l != 9 { - return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) - } - - return BufferStats{ - Get: us[0], - Create: us[1], - GetLocked: us[2], - GetLockedWaited: us[3], - BusyLocked: us[4], - MissLocked: us[5], - PageRetries: us[6], - PageFound: us[7], - GetRead: us[8], - }, nil -} - -// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. -func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { - if l := len(us); l != 3 { - return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) - } - - return ExtendedPrecisionStats{ - FlushBytes: us[0], - WriteBytes: us[1], - ReadBytes: us[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go deleted file mode 100644 index d86794b..0000000 --- a/vendor/github.com/prometheus/procfs/xfs/xfs.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package xfs provides access to statistics exposed by the XFS filesystem. -package xfs - -// Stats contains XFS filesystem runtime statistics, parsed from -// /proc/fs/xfs/stat. -// -// The names and meanings of each statistic were taken from -// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux -// kernel source. Most counters are uint32s (same data types used in -// xfs_stats.h), but some of the "extended precision stats" are uint64s. -type Stats struct { - // The name of the filesystem used to source these statistics. - // If empty, this indicates aggregated statistics for all XFS - // filesystems on the host. - Name string - - ExtentAllocation ExtentAllocationStats - AllocationBTree BTreeStats - BlockMapping BlockMappingStats - BlockMapBTree BTreeStats - DirectoryOperation DirectoryOperationStats - Transaction TransactionStats - InodeOperation InodeOperationStats - LogOperation LogOperationStats - ReadWrite ReadWriteStats - AttributeOperation AttributeOperationStats - InodeClustering InodeClusteringStats - Vnode VnodeStats - Buffer BufferStats - ExtendedPrecision ExtendedPrecisionStats -} - -// ExtentAllocationStats contains statistics regarding XFS extent allocations. -type ExtentAllocationStats struct { - ExtentsAllocated uint32 - BlocksAllocated uint32 - ExtentsFreed uint32 - BlocksFreed uint32 -} - -// BTreeStats contains statistics regarding an XFS internal B-tree. -type BTreeStats struct { - Lookups uint32 - Compares uint32 - RecordsInserted uint32 - RecordsDeleted uint32 -} - -// BlockMappingStats contains statistics regarding XFS block maps. -type BlockMappingStats struct { - Reads uint32 - Writes uint32 - Unmaps uint32 - ExtentListInsertions uint32 - ExtentListDeletions uint32 - ExtentListLookups uint32 - ExtentListCompares uint32 -} - -// DirectoryOperationStats contains statistics regarding XFS directory entries. -type DirectoryOperationStats struct { - Lookups uint32 - Creates uint32 - Removes uint32 - Getdents uint32 -} - -// TransactionStats contains statistics regarding XFS metadata transactions. -type TransactionStats struct { - Sync uint32 - Async uint32 - Empty uint32 -} - -// InodeOperationStats contains statistics regarding XFS inode operations. -type InodeOperationStats struct { - Attempts uint32 - Found uint32 - Recycle uint32 - Missed uint32 - Duplicate uint32 - Reclaims uint32 - AttributeChange uint32 -} - -// LogOperationStats contains statistics regarding the XFS log buffer. -type LogOperationStats struct { - Writes uint32 - Blocks uint32 - NoInternalBuffers uint32 - Force uint32 - ForceSleep uint32 -} - -// ReadWriteStats contains statistics regarding the number of read and write -// system calls for XFS filesystems. -type ReadWriteStats struct { - Read uint32 - Write uint32 -} - -// AttributeOperationStats contains statistics regarding manipulation of -// XFS extended file attributes. -type AttributeOperationStats struct { - Get uint32 - Set uint32 - Remove uint32 - List uint32 -} - -// InodeClusteringStats contains statistics regarding XFS inode clustering -// operations. -type InodeClusteringStats struct { - Iflush uint32 - Flush uint32 - FlushInode uint32 -} - -// VnodeStats contains statistics regarding XFS vnode operations. -type VnodeStats struct { - Active uint32 - Allocate uint32 - Get uint32 - Hold uint32 - Release uint32 - Reclaim uint32 - Remove uint32 - Free uint32 -} - -// BufferStats contains statistics regarding XFS read/write I/O buffers. -type BufferStats struct { - Get uint32 - Create uint32 - GetLocked uint32 - GetLockedWaited uint32 - BusyLocked uint32 - MissLocked uint32 - PageRetries uint32 - PageFound uint32 - GetRead uint32 -} - -// ExtendedPrecisionStats contains high precision counters used to track the -// total number of bytes read, written, or flushed, during XFS operations. -type ExtendedPrecisionStats struct { - FlushBytes uint64 - WriteBytes uint64 - ReadBytes uint64 -} diff --git a/vendor/github.com/rs/cors/.travis.yml b/vendor/github.com/rs/cors/.travis.yml deleted file mode 100644 index bbb5185..0000000 --- a/vendor/github.com/rs/cors/.travis.yml +++ /dev/null @@ -1,4 +0,0 @@ -language: go -go: -- 1.3 -- 1.4 diff --git a/vendor/github.com/rs/cors/LICENSE b/vendor/github.com/rs/cors/LICENSE deleted file mode 100644 index d8e2df5..0000000 --- a/vendor/github.com/rs/cors/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/rs/cors/README.md b/vendor/github.com/rs/cors/README.md deleted file mode 100644 index 4bf5672..0000000 --- a/vendor/github.com/rs/cors/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Go CORS handler [![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/cors) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/cors/master/LICENSE) [![build](https://img.shields.io/travis/rs/cors.svg?style=flat)](https://travis-ci.org/rs/cors) [![Coverage](http://gocover.io/_badge/github.com/rs/cors)](http://gocover.io/github.com/rs/cors) - -CORS is a `net/http` handler implementing [Cross Origin Resource Sharing W3 specification](http://www.w3.org/TR/cors/) in Golang. - -## Getting Started - -After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`. - -```go -package main - -import ( - "net/http" - - "github.com/rs/cors" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - // cors.Default() setup the middleware with default options being - // all origins accepted with simple methods (GET, POST). See - // documentation below for more options. - handler := cors.Default().Handler(mux) - http.ListenAndServe(":8080", handler) -} -``` - -Install `cors`: - - go get github.com/rs/cors - -Then run your server: - - go run server.go - -The server now runs on `localhost:8080`: - - $ curl -D - -H 'Origin: http://foo.com' http://localhost:8080/ - HTTP/1.1 200 OK - Access-Control-Allow-Origin: foo.com - Content-Type: application/json - Date: Sat, 25 Oct 2014 03:43:57 GMT - Content-Length: 18 - - {"hello": "world"} - -### More Examples - -* `net/http`: [examples/nethttp/server.go](https://github.com/rs/cors/blob/master/examples/nethttp/server.go) -* [Goji](https://goji.io): [examples/goji/server.go](https://github.com/rs/cors/blob/master/examples/goji/server.go) -* [Martini](http://martini.codegangsta.io): [examples/martini/server.go](https://github.com/rs/cors/blob/master/examples/martini/server.go) -* [Negroni](https://github.com/codegangsta/negroni): [examples/negroni/server.go](https://github.com/rs/cors/blob/master/examples/negroni/server.go) -* [Alice](https://github.com/justinas/alice): [examples/alice/server.go](https://github.com/rs/cors/blob/master/examples/alice/server.go) - -## Parameters - -Parameters are passed to the middleware thru the `cors.New` method as follow: - -```go -c := cors.New(cors.Options{ - AllowedOrigins: []string{"http://foo.com"}, - AllowCredentials: true, -}) - -// Insert the middleware -handler = c.Handler(handler) -``` - -* **AllowedOrigins** `[]string`: A list of origins a cross-domain request can be executed from. If the special `*` value is present in the list, all origins will be allowed. An origin may contain a wildcard (`*`) to replace 0 or more characters (i.e.: `http://*.domain.com`). Usage of wildcards implies a small performance penality. Only one wildcard can be used per origin. The default value is `*`. -* **AllowOriginFunc** `func (origin string) bool`: A custom function to validate the origin. It take the origin as argument and returns true if allowed or false otherwise. If this option is set, the content of `AllowedOrigins` is ignored -* **AllowedMethods** `[]string`: A list of methods the client is allowed to use with cross-domain requests. Default value is simple methods (`GET` and `POST`). -* **AllowedHeaders** `[]string`: A list of non simple headers the client is allowed to use with cross-domain requests. -* **ExposedHeaders** `[]string`: Indicates which headers are safe to expose to the API of a CORS API specification -* **AllowCredentials** `bool`: Indicates whether the request can include user credentials like cookies, HTTP authentication or client side SSL certificates. The default is `false`. -* **MaxAge** `int`: Indicates how long (in seconds) the results of a preflight request can be cached. The default is `0` which stands for no max age. -* **OptionsPassthrough** `bool`: Instructs preflight to let other potential next handlers to process the `OPTIONS` method. Turn this on if your application handles `OPTIONS`. -* **Debug** `bool`: Debugging flag adds additional output to debug server side CORS issues. - -See [API documentation](http://godoc.org/github.com/rs/cors) for more info. - -## Benchmarks - - BenchmarkWithout 20000000 64.6 ns/op 8 B/op 1 allocs/op - BenchmarkDefault 3000000 469 ns/op 114 B/op 2 allocs/op - BenchmarkAllowedOrigin 3000000 608 ns/op 114 B/op 2 allocs/op - BenchmarkPreflight 20000000 73.2 ns/op 0 B/op 0 allocs/op - BenchmarkPreflightHeader 20000000 73.6 ns/op 0 B/op 0 allocs/op - BenchmarkParseHeaderList 2000000 847 ns/op 184 B/op 6 allocs/op - BenchmarkParse…Single 5000000 290 ns/op 32 B/op 3 allocs/op - BenchmarkParse…Normalized 2000000 776 ns/op 160 B/op 6 allocs/op - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/cors/master/LICENSE). diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go deleted file mode 100644 index 4bb22d8..0000000 --- a/vendor/github.com/rs/cors/cors.go +++ /dev/null @@ -1,412 +0,0 @@ -/* -Package cors is net/http handler to handle CORS related requests -as defined by http://www.w3.org/TR/cors/ - -You can configure it by passing an option struct to cors.New: - - c := cors.New(cors.Options{ - AllowedOrigins: []string{"foo.com"}, - AllowedMethods: []string{"GET", "POST", "DELETE"}, - AllowCredentials: true, - }) - -Then insert the handler in the chain: - - handler = c.Handler(handler) - -See Options documentation for more options. - -The resulting handler is a standard net/http handler. -*/ -package cors - -import ( - "log" - "net/http" - "os" - "strconv" - "strings" - - "github.com/rs/xhandler" - "golang.org/x/net/context" -) - -// Options is a configuration container to setup the CORS middleware. -type Options struct { - // AllowedOrigins is a list of origins a cross-domain request can be executed from. - // If the special "*" value is present in the list, all origins will be allowed. - // An origin may contain a wildcard (*) to replace 0 or more characters - // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penality. - // Only one wildcard can be used per origin. - // Default value is ["*"] - AllowedOrigins []string - // AllowOriginFunc is a custom function to validate the origin. It take the origin - // as argument and returns true if allowed or false otherwise. If this option is - // set, the content of AllowedOrigins is ignored. - AllowOriginFunc func(origin string) bool - // AllowedMethods is a list of methods the client is allowed to use with - // cross-domain requests. Default value is simple methods (GET and POST) - AllowedMethods []string - // AllowedHeaders is list of non simple headers the client is allowed to use with - // cross-domain requests. - // If the special "*" value is present in the list, all headers will be allowed. - // Default value is [] but "Origin" is always appended to the list. - AllowedHeaders []string - // ExposedHeaders indicates which headers are safe to expose to the API of a CORS - // API specification - ExposedHeaders []string - // AllowCredentials indicates whether the request can include user credentials like - // cookies, HTTP authentication or client side SSL certificates. - AllowCredentials bool - // MaxAge indicates how long (in seconds) the results of a preflight request - // can be cached - MaxAge int - // OptionsPassthrough instructs preflight to let other potential next handlers to - // process the OPTIONS method. Turn this on if your application handles OPTIONS. - OptionsPassthrough bool - // Debugging flag adds additional output to debug server side CORS issues - Debug bool -} - -// Cors http handler -type Cors struct { - // Debug logger - Log *log.Logger - // Set to true when allowed origins contains a "*" - allowedOriginsAll bool - // Normalized list of plain allowed origins - allowedOrigins []string - // List of allowed origins containing wildcards - allowedWOrigins []wildcard - // Optional origin validator function - allowOriginFunc func(origin string) bool - // Set to true when allowed headers contains a "*" - allowedHeadersAll bool - // Normalized list of allowed headers - allowedHeaders []string - // Normalized list of allowed methods - allowedMethods []string - // Normalized list of exposed headers - exposedHeaders []string - allowCredentials bool - maxAge int - optionPassthrough bool -} - -// New creates a new Cors handler with the provided options. -func New(options Options) *Cors { - c := &Cors{ - exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey), - allowOriginFunc: options.AllowOriginFunc, - allowCredentials: options.AllowCredentials, - maxAge: options.MaxAge, - optionPassthrough: options.OptionsPassthrough, - } - if options.Debug { - c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags) - } - - // Normalize options - // Note: for origins and methods matching, the spec requires a case-sensitive matching. - // As it may error prone, we chose to ignore the spec here. - - // Allowed Origins - if len(options.AllowedOrigins) == 0 { - // Default is all origins - c.allowedOriginsAll = true - } else { - c.allowedOrigins = []string{} - c.allowedWOrigins = []wildcard{} - for _, origin := range options.AllowedOrigins { - // Normalize - origin = strings.ToLower(origin) - if origin == "*" { - // If "*" is present in the list, turn the whole list into a match all - c.allowedOriginsAll = true - c.allowedOrigins = nil - c.allowedWOrigins = nil - break - } else if i := strings.IndexByte(origin, '*'); i >= 0 { - // Split the origin in two: start and end string without the * - w := wildcard{origin[0:i], origin[i+1 : len(origin)]} - c.allowedWOrigins = append(c.allowedWOrigins, w) - } else { - c.allowedOrigins = append(c.allowedOrigins, origin) - } - } - } - - // Allowed Headers - if len(options.AllowedHeaders) == 0 { - // Use sensible defaults - c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"} - } else { - // Origin is always appended as some browsers will always request for this header at preflight - c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey) - for _, h := range options.AllowedHeaders { - if h == "*" { - c.allowedHeadersAll = true - c.allowedHeaders = nil - break - } - } - } - - // Allowed Methods - if len(options.AllowedMethods) == 0 { - // Default is spec's "simple" methods - c.allowedMethods = []string{"GET", "POST"} - } else { - c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) - } - - return c -} - -// Default creates a new Cors handler with default options -func Default() *Cors { - return New(Options{}) -} - -// Handler apply the CORS specification on the request, and add relevant CORS headers -// as necessary. -func (c *Cors) Handler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - c.logf("Handler: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - h.ServeHTTP(w, r) - } else { - w.WriteHeader(http.StatusOK) - } - } else { - c.logf("Handler: Actual request") - c.handleActualRequest(w, r) - h.ServeHTTP(w, r) - } - }) -} - -// HandlerC is net/context aware handler -func (c *Cors) HandlerC(h xhandler.HandlerC) xhandler.HandlerC { - return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - c.logf("Handler: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - h.ServeHTTPC(ctx, w, r) - } else { - w.WriteHeader(http.StatusOK) - } - } else { - c.logf("Handler: Actual request") - c.handleActualRequest(w, r) - h.ServeHTTPC(ctx, w, r) - } - }) -} - -// HandlerFunc provides Martini compatible handler -func (c *Cors) HandlerFunc(w http.ResponseWriter, r *http.Request) { - if r.Method == "OPTIONS" { - c.logf("HandlerFunc: Preflight request") - c.handlePreflight(w, r) - } else { - c.logf("HandlerFunc: Actual request") - c.handleActualRequest(w, r) - } -} - -// Negroni compatible interface -func (c *Cors) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if r.Method == "OPTIONS" { - c.logf("ServeHTTP: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - next(w, r) - } else { - w.WriteHeader(http.StatusOK) - } - } else { - c.logf("ServeHTTP: Actual request") - c.handleActualRequest(w, r) - next(w, r) - } -} - -// handlePreflight handles pre-flight CORS requests -func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method != "OPTIONS" { - c.logf(" Preflight aborted: %s!=OPTIONS", r.Method) - return - } - // Always set Vary headers - // see https://github.com/rs/cors/issues/10, - // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001 - headers.Add("Vary", "Origin") - headers.Add("Vary", "Access-Control-Request-Method") - headers.Add("Vary", "Access-Control-Request-Headers") - - if origin == "" { - c.logf(" Preflight aborted: empty origin") - return - } - if !c.isOriginAllowed(origin) { - c.logf(" Preflight aborted: origin '%s' not allowed", origin) - return - } - - reqMethod := r.Header.Get("Access-Control-Request-Method") - if !c.isMethodAllowed(reqMethod) { - c.logf(" Preflight aborted: method '%s' not allowed", reqMethod) - return - } - reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers")) - if !c.areHeadersAllowed(reqHeaders) { - c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders) - return - } - headers.Set("Access-Control-Allow-Origin", origin) - // Spec says: Since the list of methods can be unbounded, simply returning the method indicated - // by Access-Control-Request-Method (if supported) can be enough - headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod)) - if len(reqHeaders) > 0 { - - // Spec says: Since the list of headers can be unbounded, simply returning supported headers - // from Access-Control-Request-Headers can be enough - headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - if c.maxAge > 0 { - headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) - } - c.logf(" Preflight response headers: %v", headers) -} - -// handleActualRequest handles simple cross-origin requests, actual request or redirects -func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method == "OPTIONS" { - c.logf(" Actual request no headers added: method == %s", r.Method) - return - } - // Always set Vary, see https://github.com/rs/cors/issues/10 - headers.Add("Vary", "Origin") - if origin == "" { - c.logf(" Actual request no headers added: missing origin") - return - } - if !c.isOriginAllowed(origin) { - c.logf(" Actual request no headers added: origin '%s' not allowed", origin) - return - } - - // Note that spec does define a way to specifically disallow a simple method like GET or - // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the - // spec doesn't instruct to check the allowed methods for simple cross-origin requests. - // We think it's a nice feature to be able to have control on those methods though. - if !c.isMethodAllowed(r.Method) { - c.logf(" Actual request no headers added: method '%s' not allowed", r.Method) - - return - } - headers.Set("Access-Control-Allow-Origin", origin) - if len(c.exposedHeaders) > 0 { - headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - c.logf(" Actual response added headers: %v", headers) -} - -// convenience method. checks if debugging is turned on before printing -func (c *Cors) logf(format string, a ...interface{}) { - if c.Log != nil { - c.Log.Printf(format, a...) - } -} - -// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests -// on the endpoint -func (c *Cors) isOriginAllowed(origin string) bool { - if c.allowOriginFunc != nil { - return c.allowOriginFunc(origin) - } - if c.allowedOriginsAll { - return true - } - origin = strings.ToLower(origin) - for _, o := range c.allowedOrigins { - if o == origin { - return true - } - } - for _, w := range c.allowedWOrigins { - if w.match(origin) { - return true - } - } - return false -} - -// isMethodAllowed checks if a given method can be used as part of a cross-domain request -// on the endpoing -func (c *Cors) isMethodAllowed(method string) bool { - if len(c.allowedMethods) == 0 { - // If no method allowed, always return false, even for preflight request - return false - } - method = strings.ToUpper(method) - if method == "OPTIONS" { - // Always allow preflight requests - return true - } - for _, m := range c.allowedMethods { - if m == method { - return true - } - } - return false -} - -// areHeadersAllowed checks if a given list of headers are allowed to used within -// a cross-domain request. -func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool { - if c.allowedHeadersAll || len(requestedHeaders) == 0 { - return true - } - for _, header := range requestedHeaders { - header = http.CanonicalHeaderKey(header) - found := false - for _, h := range c.allowedHeaders { - if h == header { - found = true - } - } - if !found { - return false - } - } - return true -} diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go deleted file mode 100644 index c7a0aa0..0000000 --- a/vendor/github.com/rs/cors/utils.go +++ /dev/null @@ -1,70 +0,0 @@ -package cors - -import "strings" - -const toLower = 'a' - 'A' - -type converter func(string) string - -type wildcard struct { - prefix string - suffix string -} - -func (w wildcard) match(s string) bool { - return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) -} - -// convert converts a list of string using the passed converter function -func convert(s []string, c converter) []string { - out := []string{} - for _, i := range s { - out = append(out, c(i)) - } - return out -} - -// parseHeaderList tokenize + normalize a string containing a list of headers -func parseHeaderList(headerList string) []string { - l := len(headerList) - h := make([]byte, 0, l) - upper := true - // Estimate the number headers in order to allocate the right splice size - t := 0 - for i := 0; i < l; i++ { - if headerList[i] == ',' { - t++ - } - } - headers := make([]string, 0, t) - for i := 0; i < l; i++ { - b := headerList[i] - if b >= 'a' && b <= 'z' { - if upper { - h = append(h, b-toLower) - } else { - h = append(h, b) - } - } else if b >= 'A' && b <= 'Z' { - if !upper { - h = append(h, b+toLower) - } else { - h = append(h, b) - } - } else if b == '-' || b == '_' || (b >= '0' && b <= '9') { - h = append(h, b) - } - - if b == ' ' || b == ',' || i == l-1 { - if len(h) > 0 { - // Flush the found header - headers = append(headers, string(h)) - h = h[:0] - upper = true - } - } else { - upper = b == '-' || b == '_' - } - } - return headers -} diff --git a/vendor/github.com/rs/xhandler/.travis.yml b/vendor/github.com/rs/xhandler/.travis.yml deleted file mode 100644 index b65c7a9..0000000 --- a/vendor/github.com/rs/xhandler/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: -- 1.5 -- tip -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/rs/xhandler/LICENSE b/vendor/github.com/rs/xhandler/LICENSE deleted file mode 100644 index 47c5e9d..0000000 --- a/vendor/github.com/rs/xhandler/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/rs/xhandler/README.md b/vendor/github.com/rs/xhandler/README.md deleted file mode 100644 index 91c594b..0000000 --- a/vendor/github.com/rs/xhandler/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# XHandler - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xhandler) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xhandler.svg?branch=master)](https://travis-ci.org/rs/xhandler) [![Coverage](http://gocover.io/_badge/github.com/rs/xhandler)](http://gocover.io/github.com/rs/xhandler) - -XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`. - -It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router. - -Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly. - -You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler. - -Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/). - -## Installing - - go get -u github.com/rs/xhandler - -## Usage - -```go -package main - -import ( - "log" - "net/http" - "time" - - "github.com/rs/cors" - "github.com/rs/xhandler" - "golang.org/x/net/context" -) - -type myMiddleware struct { - next xhandler.HandlerC -} - -func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { - ctx = context.WithValue(ctx, "test", "World") - h.next.ServeHTTPC(ctx, w, r) -} - -func main() { - c := xhandler.Chain{} - - // Add close notifier handler so context is cancelled when the client closes - // the connection - c.UseC(xhandler.CloseHandler) - - // Add timeout handler - c.UseC(xhandler.TimeoutHandler(2 * time.Second)) - - // Middleware putting something in the context - c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC { - return myMiddleware{next: next} - }) - - // Mix it with a non-context-aware middleware handler - c.Use(cors.Default().Handler) - - // Final handler (using handlerFuncC), reading from the context - xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - value := ctx.Value("test").(string) - w.Write([]byte("Hello " + value)) - }) - - // Bridge context aware handlers with http.Handler using xhandler.Handle() - http.Handle("/test", c.Handler(xh)) - - if err := http.ListenAndServe(":8080", nil); err != nil { - log.Fatal(err) - } -} -``` - -### Using xmux - -Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter): - -```go -package main - -import ( - "fmt" - "log" - "net/http" - "time" - - "github.com/rs/xhandler" - "github.com/rs/xmux" - "golang.org/x/net/context" -) - -func main() { - c := xhandler.Chain{} - - // Append a context-aware middleware handler - c.UseC(xhandler.CloseHandler) - - // Another context-aware middleware handler - c.UseC(xhandler.TimeoutHandler(2 * time.Second)) - - mux := xmux.New() - - // Use c.Handler to terminate the chain with your final handler - mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name")) - })) - - if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil { - log.Fatal(err) - } -} -``` - -See [xmux](https://github.com/rs/xmux) for more examples. - -## Context Aware Middleware - -Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface. - -Feel free to put up a PR linking your middleware if you have built one: - -| Middleware | Author | Description | -| ---------- | ------ | ----------- | -| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer | -| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger | -| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation | -| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE). diff --git a/vendor/github.com/rs/xhandler/chain.go b/vendor/github.com/rs/xhandler/chain.go deleted file mode 100644 index 3e4bd35..0000000 --- a/vendor/github.com/rs/xhandler/chain.go +++ /dev/null @@ -1,121 +0,0 @@ -package xhandler - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// Chain is a helper for chaining middleware handlers together for easier -// management. -type Chain []func(next HandlerC) HandlerC - -// Add appends a variable number of additional middleware handlers -// to the middleware chain. Middleware handlers can either be -// context-aware or non-context aware handlers with the appropriate -// function signatures. -func (c *Chain) Add(f ...interface{}) { - for _, h := range f { - switch v := h.(type) { - case func(http.Handler) http.Handler: - c.Use(v) - case func(HandlerC) HandlerC: - c.UseC(v) - default: - panic("Adding invalid handler to the middleware chain") - } - } -} - -// With creates a new middleware chain from an existing chain, -// extending it with additional middleware. Middleware handlers -// can either be context-aware or non-context aware handlers -// with the appropriate function signatures. -func (c *Chain) With(f ...interface{}) *Chain { - n := make(Chain, len(*c)) - copy(n, *c) - n.Add(f...) - return &n -} - -// UseC appends a context-aware handler to the middleware chain. -func (c *Chain) UseC(f func(next HandlerC) HandlerC) { - *c = append(*c, f) -} - -// Use appends a standard http.Handler to the middleware chain without -// losing track of the context when inserted between two context aware handlers. -// -// Caveat: the f function will be called on each request so you are better off putting -// any initialization sequence outside of this function. -func (c *Chain) Use(f func(next http.Handler) http.Handler) { - xf := func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - n := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTPC(ctx, w, r) - }) - f(n).ServeHTTP(w, r) - }) - } - *c = append(*c, xf) -} - -// Handler wraps the provided final handler with all the middleware appended to -// the chain and returns a new standard http.Handler instance. -// The context.Background() context is injected automatically. -func (c Chain) Handler(xh HandlerC) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, xh) -} - -// HandlerFC is a helper to provide a function (HandlerFuncC) to Handler(). -// -// HandlerFC is equivalent to: -// c.Handler(xhandler.HandlerFuncC(xhc)) -func (c Chain) HandlerFC(xhf HandlerFuncC) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(xhf)) -} - -// HandlerH is a helper to provide a standard http handler (http.HandlerFunc) -// to Handler(). Your final handler won't have access to the context though. -func (c Chain) HandlerH(h http.Handler) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - h.ServeHTTP(w, r) - })) -} - -// HandlerF is a helper to provide a standard http handler function -// (http.HandlerFunc) to Handler(). Your final handler won't have access -// to the context though. -func (c Chain) HandlerF(hf http.HandlerFunc) http.Handler { - ctx := context.Background() - return c.HandlerCtx(ctx, HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - hf(w, r) - })) -} - -// HandlerCtx wraps the provided final handler with all the middleware appended to -// the chain and returns a new standard http.Handler instance. -func (c Chain) HandlerCtx(ctx context.Context, xh HandlerC) http.Handler { - return New(ctx, c.HandlerC(xh)) -} - -// HandlerC wraps the provided final handler with all the middleware appended to -// the chain and returns a HandlerC instance. -func (c Chain) HandlerC(xh HandlerC) HandlerC { - for i := len(c) - 1; i >= 0; i-- { - xh = c[i](xh) - } - return xh -} - -// HandlerCF wraps the provided final handler func with all the middleware appended to -// the chain and returns a HandlerC instance. -// -// HandlerCF is equivalent to: -// c.HandlerC(xhandler.HandlerFuncC(xhc)) -func (c Chain) HandlerCF(xhc HandlerFuncC) HandlerC { - return c.HandlerC(HandlerFuncC(xhc)) -} diff --git a/vendor/github.com/rs/xhandler/middleware.go b/vendor/github.com/rs/xhandler/middleware.go deleted file mode 100644 index 7ad8fba..0000000 --- a/vendor/github.com/rs/xhandler/middleware.go +++ /dev/null @@ -1,59 +0,0 @@ -package xhandler - -import ( - "net/http" - "time" - - "golang.org/x/net/context" -) - -// CloseHandler returns a Handler, cancelling the context when the client -// connection closes unexpectedly. -func CloseHandler(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - // Cancel the context if the client closes the connection - if wcn, ok := w.(http.CloseNotifier); ok { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - - notify := wcn.CloseNotify() - go func() { - select { - case <-notify: - cancel() - case <-ctx.Done(): - } - }() - } - - next.ServeHTTPC(ctx, w, r) - }) -} - -// TimeoutHandler returns a Handler which adds a timeout to the context. -// -// Child handlers have the responsability of obeying the context deadline and to return -// an appropriate error (or not) response in case of timeout. -func TimeoutHandler(timeout time.Duration) func(next HandlerC) HandlerC { - return func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - ctx, _ = context.WithTimeout(ctx, timeout) - next.ServeHTTPC(ctx, w, r) - }) - } -} - -// If is a special handler that will skip insert the condNext handler only if a condition -// applies at runtime. -func If(cond func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool, condNext func(next HandlerC) HandlerC) func(next HandlerC) HandlerC { - return func(next HandlerC) HandlerC { - return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { - if cond(ctx, w, r) { - condNext(next).ServeHTTPC(ctx, w, r) - } else { - next.ServeHTTPC(ctx, w, r) - } - }) - } -} diff --git a/vendor/github.com/rs/xhandler/xhandler.go b/vendor/github.com/rs/xhandler/xhandler.go deleted file mode 100644 index bc832cb..0000000 --- a/vendor/github.com/rs/xhandler/xhandler.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package xhandler provides a bridge between http.Handler and net/context. -// -// xhandler enforces net/context in your handlers without sacrificing -// compatibility with existing http.Handlers nor imposing a specific router. -// -// Thanks to net/context deadline management, xhandler is able to enforce -// a per request deadline and will cancel the context in when the client close -// the connection unexpectedly. -// -// You may create net/context aware middlewares pretty much the same way as -// you would with http.Handler. -package xhandler // import "github.com/rs/xhandler" - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HandlerC is a net/context aware http.Handler -type HandlerC interface { - ServeHTTPC(context.Context, http.ResponseWriter, *http.Request) -} - -// HandlerFuncC type is an adapter to allow the use of ordinary functions -// as an xhandler.Handler. If f is a function with the appropriate signature, -// xhandler.HandlerFuncC(f) is a xhandler.Handler object that calls f. -type HandlerFuncC func(context.Context, http.ResponseWriter, *http.Request) - -// ServeHTTPC calls f(ctx, w, r). -func (f HandlerFuncC) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { - f(ctx, w, r) -} - -// New creates a conventional http.Handler injecting the provided root -// context to sub handlers. This handler is used as a bridge between conventional -// http.Handler and context aware handlers. -func New(ctx context.Context, h HandlerC) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - h.ServeHTTPC(ctx, w, r) - }) -} diff --git a/vendor/github.com/steakknife/bloomfilter/.travis.yml b/vendor/github.com/steakknife/bloomfilter/.travis.yml deleted file mode 100644 index 73d1238..0000000 --- a/vendor/github.com/steakknife/bloomfilter/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -dist: trusty -sudo: false -go: - - "1.8.x" - - "1.9.x" - - "1.10.x" - - master -before_script: - - "go get -u gopkg.in/alecthomas/gometalinter.v2" - - "gometalinter.v2 --install" -script: - - "go test -v -cover -benchmem -bench=. $(go list ./... | grep -v /vendor/ | sed \"s&_${PWD}&.&\")" - - "gometalinter.v2 --enable-all ./..." diff --git a/vendor/github.com/steakknife/bloomfilter/MIT-LICENSE.txt b/vendor/github.com/steakknife/bloomfilter/MIT-LICENSE.txt deleted file mode 100644 index ccf77fe..0000000 --- a/vendor/github.com/steakknife/bloomfilter/MIT-LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright © 2014, 2015 Barry Allard - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/steakknife/bloomfilter/README.md b/vendor/github.com/steakknife/bloomfilter/README.md deleted file mode 100644 index 4587f14..0000000 --- a/vendor/github.com/steakknife/bloomfilter/README.md +++ /dev/null @@ -1,123 +0,0 @@ -**Important**: Zeroth, [consider](https://bdupras.github.io/filter-tutorial/) if a [Cuckoo filter](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf) could be [right for your use-case](https://github.com/seiflotfy/cuckoofilter). - - -[![GoDoc](https://godoc.org/github.com/steakknife/bloomfilter?status.png)](https://godoc.org/github.com/steakknife/bloomfilter) [![travis](https://img.shields.io/travis/steakknife/bloomfilter.svg)](https://travis-ci.org/steakknife/bloomfilter) - -# Face-meltingly fast, thread-safe, marshalable, unionable, probability- and optimal-size-calculating Bloom filter in go - -Copyright © 2014-2016,2018 Barry Allard - -[MIT license](MIT-LICENSE.txt) - -## WTF is a bloom filter - -**TL;DR: **Probabilistic, extra lookup table to track a set of elements kept elsewhere to reduce expensive, unnecessary set element retrieval and/or iterator operations **when an element is not present in the set.** It's a classic time-storage tradeoff algoritm. - -### Properties - -#### [See wikipedia](https://en.wikipedia.org/wiki/Bloom_filter) for algorithm details - -|Impact|What|Description| -|---|---|---| -|Good|No false negatives|know for certain if a given element is definitely NOT in the set| -|Bad|False positives|uncertain if a given element is in the set| -|Bad|Theoretical potential for hash collisions|in very large systems and/or badly hash.Hash64-conforming implementations| -|Bad|Add only|Cannot remove an element, it would destroy information about other elements| -|Good|Constant storage|uses only a fixed amount of memory| - -## Naming conventions - -(Similar to algorithm) - -|Variable/function|Description|Range| -|---|---|---| -|m/M()|number of bits in the bloom filter (memory representation is about m/8 bytes in size)|>=2| -|n/N()|number of elements present|>=0| -|k/K()|number of keys to use (keys are kept private to user code but are de/serialized to Marshal and file I/O)|>=0| -|maxN|maximum capacity of intended structure|>0| -|p|maximum allowed probability of collision (for computing m and k for optimal sizing)|>0..<1| - -- Memory representation should be exactly `24 + 8*(k + (m+63)/64) + unsafe.Sizeof(RWMutex)` bytes. -- Serialized (`BinaryMarshaler`) representation should be exactly `72 + 8*(k + (m+63)/64)` bytes. (Disk format is less due to compression.) - -## Binary serialization format - -All values in Little-endian format - -|Offset|Offset (Hex)|Length (bytes)|Name|Type| -|---|---|---|---|---| -|0|00|8|k|`uint64`| -|8|08|8|n|`uint64`| -|16|10|8|m|`uint64`| -|24|18|k|(keys)|`[k]uint64`| -|24+8*k|...|(m+63)/64|(bloom filter)|`[(m+63)/64]uint64`| -|24+8\*k+8\*((m+63)/64)|...|48|(SHA384 of all previous fields, hashed in order)|`[48]byte`| - -- `bloomfilter.Filter` conforms to `encoding.BinaryMarshaler` and `encoding.BinaryUnmarshaler' - -## Usage - -```go - -import "github.com/steakknife/bloomfilter" - -const ( - maxElements = 100000 - probCollide = 0.0000001 -) - -bf, err := bloomfilter.NewOptimal(maxElements, probCollide) -if err != nil { - panic(err) -} - -someValue := ... // must conform to hash.Hash64 - -bf.Add(someValue) -if bf.Contains(someValue) { // probably true, could be false - // whatever -} - -anotherValue := ... // must also conform to hash.Hash64 - -if bf.Contains(anotherValue) { - panic("This should never happen") -} - -err := bf.WriteFile("1.bf.gz") // saves this BF to a file -if err != nil { - panic(err) -} - -bf2, err := bloomfilter.ReadFile("1.bf.gz") // read the BF to another var -if err != nil { - panic(err) -} -``` - - -## Design - -Where possible, branch-free operations are used to avoid deep pipeline / execution unit stalls on branch-misses. - -## Get - - go get -u github.com/steakknife/bloomfilter # master is always stable - -## Source - -- On the web: [https://github.com/steakknife/bloomfilter](https://github.com/steakknife/bloomfilter) - -- Git: `git clone https://github.com/steakknife/bloomfilter` - -## Contact - -- [Feedback](mailto:barry.allard@gmail.com) - -- [Issues](https://github.com/steakknife/bloomfilter/issues) - -## License - -[MIT license](MIT-LICENSE.txt) - -Copyright © 2014-2016 Barry Allard diff --git a/vendor/github.com/steakknife/bloomfilter/binarymarshaler.go b/vendor/github.com/steakknife/bloomfilter/binarymarshaler.go deleted file mode 100644 index 2fa6692..0000000 --- a/vendor/github.com/steakknife/bloomfilter/binarymarshaler.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "bytes" - "crypto/sha512" - "encoding/binary" -) - -// conforms to encoding.BinaryMarshaler - -// marshalled binary layout (Little Endian): -// -// k 1 uint64 -// n 1 uint64 -// m 1 uint64 -// keys [k]uint64 -// bits [(m+63)/64]uint64 -// hash sha384 (384 bits == 48 bytes) -// -// size = (3 + k + (m+63)/64) * 8 bytes -// - -func (f *Filter) marshal() (buf *bytes.Buffer, - hash [sha512.Size384]byte, - err error, -) { - f.lock.RLock() - defer f.lock.RUnlock() - - debug("write bf k=%d n=%d m=%d\n", f.K(), f.n, f.m) - - buf = new(bytes.Buffer) - - err = binary.Write(buf, binary.LittleEndian, f.K()) - if err != nil { - return nil, hash, err - } - - err = binary.Write(buf, binary.LittleEndian, f.n) - if err != nil { - return nil, hash, err - } - - err = binary.Write(buf, binary.LittleEndian, f.m) - if err != nil { - return nil, hash, err - } - - err = binary.Write(buf, binary.LittleEndian, f.keys) - if err != nil { - return nil, hash, err - } - - err = binary.Write(buf, binary.LittleEndian, f.bits) - if err != nil { - return nil, hash, err - } - - hash = sha512.Sum384(buf.Bytes()) - err = binary.Write(buf, binary.LittleEndian, hash) - return buf, hash, err -} - -// MarshalBinary converts a Filter into []bytes -func (f *Filter) MarshalBinary() (data []byte, err error) { - buf, hash, err := f.marshal() - if err != nil { - return nil, err - } - - debug( - "bloomfilter.MarshalBinary: Successfully wrote %d byte(s), sha384 %v", - buf.Len(), hash, - ) - data = buf.Bytes() - return data, nil -} diff --git a/vendor/github.com/steakknife/bloomfilter/binaryunmarshaler.go b/vendor/github.com/steakknife/bloomfilter/binaryunmarshaler.go deleted file mode 100644 index 5be1670..0000000 --- a/vendor/github.com/steakknife/bloomfilter/binaryunmarshaler.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "bytes" - "crypto/hmac" - "crypto/sha512" - "encoding/binary" - "io" -) - -func unmarshalBinaryHeader(r io.Reader) (k, n, m uint64, err error) { - err = binary.Read(r, binary.LittleEndian, &k) - if err != nil { - return k, n, m, err - } - - if k < KMin { - return k, n, m, errK() - } - - err = binary.Read(r, binary.LittleEndian, &n) - if err != nil { - return k, n, m, err - } - - err = binary.Read(r, binary.LittleEndian, &m) - if err != nil { - return k, n, m, err - } - - if m < MMin { - return k, n, m, errM() - } - - debug("read bf k=%d n=%d m=%d\n", k, n, m) - - return k, n, m, err -} - -func unmarshalBinaryBits(r io.Reader, m uint64) (bits []uint64, err error) { - bits, err = newBits(m) - if err != nil { - return bits, err - } - err = binary.Read(r, binary.LittleEndian, bits) - return bits, err - -} - -func unmarshalBinaryKeys(r io.Reader, k uint64) (keys []uint64, err error) { - keys = make([]uint64, k) - err = binary.Read(r, binary.LittleEndian, keys) - return keys, err -} - -func checkBinaryHash(r io.Reader, data []byte) (err error) { - expectedHash := make([]byte, sha512.Size384) - err = binary.Read(r, binary.LittleEndian, expectedHash) - if err != nil { - return err - } - - actualHash := sha512.Sum384(data[:len(data)-sha512.Size384]) - - if !hmac.Equal(expectedHash, actualHash[:]) { - debug("bloomfilter.UnmarshalBinary() sha384 hash failed:"+ - " actual %v expected %v", actualHash, expectedHash) - return errHash() - } - - debug("bloomfilter.UnmarshalBinary() successfully read"+ - " %d byte(s), sha384 %v", len(data), actualHash) - return nil -} - -// UnmarshalBinary converts []bytes into a Filter -// conforms to encoding.BinaryUnmarshaler -func (f *Filter) UnmarshalBinary(data []byte) (err error) { - f.lock.Lock() - defer f.lock.Unlock() - - buf := bytes.NewBuffer(data) - - var k uint64 - k, f.n, f.m, err = unmarshalBinaryHeader(buf) - if err != nil { - return err - } - - f.keys, err = unmarshalBinaryKeys(buf, k) - if err != nil { - return err - } - - f.bits, err = unmarshalBinaryBits(buf, f.m) - if err != nil { - return err - } - - return checkBinaryHash(buf, data) -} diff --git a/vendor/github.com/steakknife/bloomfilter/bloomfilter.go b/vendor/github.com/steakknife/bloomfilter/bloomfilter.go deleted file mode 100644 index 8225063..0000000 --- a/vendor/github.com/steakknife/bloomfilter/bloomfilter.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "hash" - "sync" -) - -// Filter is an opaque Bloom filter type -type Filter struct { - lock sync.RWMutex - bits []uint64 - keys []uint64 - m uint64 // number of bits the "bits" field should recognize - n uint64 // number of inserted elements -} - -// Hashable -> hashes -func (f *Filter) hash(v hash.Hash64) []uint64 { - rawHash := v.Sum64() - n := len(f.keys) - hashes := make([]uint64, n) - for i := 0; i < n; i++ { - hashes[i] = rawHash ^ f.keys[i] - } - return hashes -} - -// M is the size of Bloom filter, in bits -func (f *Filter) M() uint64 { - return f.m -} - -// K is the count of keys -func (f *Filter) K() uint64 { - return uint64(len(f.keys)) -} - -// Add a hashable item, v, to the filter -func (f *Filter) Add(v hash.Hash64) { - f.lock.Lock() - defer f.lock.Unlock() - - for _, i := range f.hash(v) { - // f.setBit(i) - i %= f.m - f.bits[i>>6] |= 1 << uint(i&0x3f) - } - f.n++ -} - -// Contains tests if f contains v -// false: f definitely does not contain value v -// true: f maybe contains value v -func (f *Filter) Contains(v hash.Hash64) bool { - f.lock.RLock() - defer f.lock.RUnlock() - - r := uint64(1) - for _, i := range f.hash(v) { - // r |= f.getBit(k) - i %= f.m - r &= (f.bits[i>>6] >> uint(i&0x3f)) & 1 - } - return uint64ToBool(r) -} - -// Copy f to a new Bloom filter -func (f *Filter) Copy() (*Filter, error) { - f.lock.RLock() - defer f.lock.RUnlock() - - out, err := f.NewCompatible() - if err != nil { - return nil, err - } - copy(out.bits, f.bits) - out.n = f.n - return out, nil -} - -// UnionInPlace merges Bloom filter f2 into f -func (f *Filter) UnionInPlace(f2 *Filter) error { - if !f.IsCompatible(f2) { - return errIncompatibleBloomFilters() - } - - f.lock.Lock() - defer f.lock.Unlock() - - for i, bitword := range f2.bits { - f.bits[i] |= bitword - } - return nil -} - -// Union merges f2 and f2 into a new Filter out -func (f *Filter) Union(f2 *Filter) (out *Filter, err error) { - if !f.IsCompatible(f2) { - return nil, errIncompatibleBloomFilters() - } - - f.lock.RLock() - defer f.lock.RUnlock() - - out, err = f.NewCompatible() - if err != nil { - return nil, err - } - for i, bitword := range f2.bits { - out.bits[i] = f.bits[i] | bitword - } - return out, nil -} diff --git a/vendor/github.com/steakknife/bloomfilter/conformance.go b/vendor/github.com/steakknife/bloomfilter/conformance.go deleted file mode 100644 index 2963686..0000000 --- a/vendor/github.com/steakknife/bloomfilter/conformance.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "encoding" - "encoding/gob" - "io" -) - -// compile-time conformance tests -var ( - _ encoding.BinaryMarshaler = (*Filter)(nil) - _ encoding.BinaryUnmarshaler = (*Filter)(nil) - _ encoding.TextMarshaler = (*Filter)(nil) - _ encoding.TextUnmarshaler = (*Filter)(nil) - _ io.ReaderFrom = (*Filter)(nil) - _ io.WriterTo = (*Filter)(nil) - _ gob.GobDecoder = (*Filter)(nil) - _ gob.GobEncoder = (*Filter)(nil) -) diff --git a/vendor/github.com/steakknife/bloomfilter/debug.go b/vendor/github.com/steakknife/bloomfilter/debug.go deleted file mode 100644 index e88b934..0000000 --- a/vendor/github.com/steakknife/bloomfilter/debug.go +++ /dev/null @@ -1,37 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "log" - "os" -) - -const debugVar = "GOLANG_STEAKKNIFE_BLOOMFILTER_DEBUG" - -// EnableDebugging permits debug() logging of details to stderr -func EnableDebugging() { - err := os.Setenv(debugVar, "1") - if err != nil { - panic("Unable to Setenv " + debugVar) - } -} - -func debugging() bool { - return os.Getenv(debugVar) != "" -} - -// debug printing when debugging() is true -func debug(format string, a ...interface{}) { - if debugging() { - log.Printf(format, a...) - } -} diff --git a/vendor/github.com/steakknife/bloomfilter/errors.go b/vendor/github.com/steakknife/bloomfilter/errors.go deleted file mode 100644 index b279739..0000000 --- a/vendor/github.com/steakknife/bloomfilter/errors.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import "fmt" - -func errHash() error { - return fmt.Errorf( - "Hash mismatch, the Bloom filter is probably corrupt") -} -func errK() error { - return fmt.Errorf( - "keys must have length %d or greater", KMin) -} -func errM() error { - return fmt.Errorf( - "m (number of bits in the Bloom filter) must be >= %d", MMin) -} -func errUniqueKeys() error { - return fmt.Errorf( - "Bloom filter keys must be unique") -} -func errIncompatibleBloomFilters() error { - return fmt.Errorf( - "Cannot perform union on two incompatible Bloom filters") -} diff --git a/vendor/github.com/steakknife/bloomfilter/fileio.go b/vendor/github.com/steakknife/bloomfilter/fileio.go deleted file mode 100644 index a479699..0000000 --- a/vendor/github.com/steakknife/bloomfilter/fileio.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "compress/gzip" - "io" - "io/ioutil" - "os" -) - -// ReadFrom r and overwrite f with new Bloom filter data -func (f *Filter) ReadFrom(r io.Reader) (n int64, err error) { - f2, n, err := ReadFrom(r) - if err != nil { - return -1, err - } - f.lock.Lock() - defer f.lock.Unlock() - f.m = f2.m - f.n = f2.n - f.bits = f2.bits - f.keys = f2.keys - return n, nil -} - -// ReadFrom Reader r into a lossless-compressed Bloom filter f -func ReadFrom(r io.Reader) (f *Filter, n int64, err error) { - rawR, err := gzip.NewReader(r) - if err != nil { - return nil, -1, err - } - defer func() { - err = rawR.Close() - }() - - content, err := ioutil.ReadAll(rawR) - if err != nil { - return nil, -1, err - } - - f = new(Filter) - n = int64(len(content)) - err = f.UnmarshalBinary(content) - if err != nil { - return nil, -1, err - } - return f, n, nil -} - -// ReadFile from filename into a lossless-compressed Bloom Filter f -// Suggested file extension: .bf.gz -func ReadFile(filename string) (f *Filter, n int64, err error) { - r, err := os.Open(filename) - if err != nil { - return nil, -1, err - } - defer func() { - err = r.Close() - }() - - return ReadFrom(r) -} - -// WriteTo a Writer w from lossless-compressed Bloom Filter f -func (f *Filter) WriteTo(w io.Writer) (n int64, err error) { - f.lock.RLock() - defer f.lock.RUnlock() - - rawW := gzip.NewWriter(w) - defer func() { - err = rawW.Close() - }() - - content, err := f.MarshalBinary() - if err != nil { - return -1, err - } - - intN, err := rawW.Write(content) - n = int64(intN) - return n, err -} - -// WriteFile filename from a a lossless-compressed Bloom Filter f -// Suggested file extension: .bf.gz -func (f *Filter) WriteFile(filename string) (n int64, err error) { - w, err := os.Create(filename) - if err != nil { - return -1, err - } - defer func() { - err = w.Close() - }() - - return f.WriteTo(w) -} diff --git a/vendor/github.com/steakknife/bloomfilter/gob.go b/vendor/github.com/steakknife/bloomfilter/gob.go deleted file mode 100644 index 0d99e55..0000000 --- a/vendor/github.com/steakknife/bloomfilter/gob.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import _ "encoding/gob" // make sure gob is available - -// GobDecode conforms to interface gob.GobDecoder -func (f *Filter) GobDecode(data []byte) error { - return f.UnmarshalBinary(data) -} - -// GobEncode conforms to interface gob.GobEncoder -func (f *Filter) GobEncode() ([]byte, error) { - return f.MarshalBinary() -} diff --git a/vendor/github.com/steakknife/bloomfilter/iscompatible.go b/vendor/github.com/steakknife/bloomfilter/iscompatible.go deleted file mode 100644 index 2073d80..0000000 --- a/vendor/github.com/steakknife/bloomfilter/iscompatible.go +++ /dev/null @@ -1,41 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import "unsafe" - -func uint64ToBool(x uint64) bool { - return *(*bool)(unsafe.Pointer(&x)) // #nosec -} - -// returns 0 if equal, does not compare len(b0) with len(b1) -func noBranchCompareUint64s(b0, b1 []uint64) uint64 { - r := uint64(0) - for i, b0i := range b0 { - r |= b0i ^ b1[i] - } - return r -} - -// IsCompatible is true if f and f2 can be Union()ed together -func (f *Filter) IsCompatible(f2 *Filter) bool { - f.lock.RLock() - defer f.lock.RUnlock() - - f.lock.RLock() - defer f2.lock.RUnlock() - - // 0 is true, non-0 is false - compat := f.M() ^ f2.M() - compat |= f.K() ^ f2.K() - compat |= noBranchCompareUint64s(f.keys, f2.keys) - return uint64ToBool(^compat) -} diff --git a/vendor/github.com/steakknife/bloomfilter/new.go b/vendor/github.com/steakknife/bloomfilter/new.go deleted file mode 100644 index bf4323a..0000000 --- a/vendor/github.com/steakknife/bloomfilter/new.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "crypto/rand" - "encoding/binary" - "log" -) - -const ( - // MMin is the minimum Bloom filter bits count - MMin = 2 - // KMin is the minimum number of keys - KMin = 1 - // Uint64Bytes is the number of bytes in type uint64 - Uint64Bytes = 8 -) - -// New Filter with CSPRNG keys -// -// m is the size of the Bloom filter, in bits, >= 2 -// -// k is the number of random keys, >= 1 -func New(m, k uint64) (*Filter, error) { - return NewWithKeys(m, newRandKeys(k)) -} - -func newRandKeys(k uint64) []uint64 { - keys := make([]uint64, k) - err := binary.Read(rand.Reader, binary.LittleEndian, keys) - if err != nil { - log.Panicf( - "Cannot read %d bytes from CSRPNG crypto/rand.Read (err=%v)", - Uint64Bytes, err, - ) - } - return keys -} - -// NewCompatible Filter compatible with f -func (f *Filter) NewCompatible() (*Filter, error) { - return NewWithKeys(f.m, f.keys) -} - -// NewOptimal Bloom filter with random CSPRNG keys -func NewOptimal(maxN uint64, p float64) (*Filter, error) { - m := OptimalM(maxN, p) - k := OptimalK(m, maxN) - debug("New optimal bloom filter ::"+ - " requested max elements (n):%d,"+ - " probability of collision (p):%1.10f "+ - "-> recommends -> bits (m): %d (%f GiB), "+ - "number of keys (k): %d", - maxN, p, m, float64(m)/(gigabitsPerGiB), k) - return New(m, k) -} - -// UniqueKeys is true if all keys are unique -func UniqueKeys(keys []uint64) bool { - for j := 0; j < len(keys)-1; j++ { - elem := keys[j] - for i := 1; i < j; i++ { - if keys[i] == elem { - return false - } - } - } - return true -} - -// NewWithKeys creates a new Filter from user-supplied origKeys -func NewWithKeys(m uint64, origKeys []uint64) (f *Filter, err error) { - bits, err := newBits(m) - if err != nil { - return nil, err - } - keys, err := newKeysCopy(origKeys) - if err != nil { - return nil, err - } - return &Filter{ - m: m, - n: 0, - bits: bits, - keys: keys, - }, nil -} - -func newBits(m uint64) ([]uint64, error) { - if m < MMin { - return nil, errM() - } - return make([]uint64, (m+63)/64), nil -} - -func newKeysBlank(k uint64) ([]uint64, error) { - if k < KMin { - return nil, errK() - } - return make([]uint64, k), nil -} - -func newKeysCopy(origKeys []uint64) (keys []uint64, err error) { - if !UniqueKeys(origKeys) { - return nil, errUniqueKeys() - } - keys, err = newKeysBlank(uint64(len(origKeys))) - if err != nil { - return keys, err - } - copy(keys, origKeys) - return keys, err -} - -func newWithKeysAndBits(m uint64, keys []uint64, bits []uint64, n uint64) ( - f *Filter, err error, -) { - f, err = NewWithKeys(m, keys) - if err != nil { - return nil, err - } - copy(f.bits, bits) - f.n = n - return f, nil -} diff --git a/vendor/github.com/steakknife/bloomfilter/optimal.go b/vendor/github.com/steakknife/bloomfilter/optimal.go deleted file mode 100644 index a836143..0000000 --- a/vendor/github.com/steakknife/bloomfilter/optimal.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import "math" - -const gigabitsPerGiB float64 = 8.0 * 1024 * 1024 * 1024 - -// OptimalK calculates the optimal k value for creating a new Bloom filter -// maxn is the maximum anticipated number of elements -func OptimalK(m, maxN uint64) uint64 { - return uint64(math.Ceil(float64(m) * math.Ln2 / float64(maxN))) -} - -// OptimalM calculates the optimal m value for creating a new Bloom filter -// p is the desired false positive probability -// optimal m = ceiling( - n * ln(p) / ln(2)**2 ) -func OptimalM(maxN uint64, p float64) uint64 { - return uint64(math.Ceil(-float64(maxN) * math.Log(p) / (math.Ln2 * math.Ln2))) -} diff --git a/vendor/github.com/steakknife/bloomfilter/statistics.go b/vendor/github.com/steakknife/bloomfilter/statistics.go deleted file mode 100644 index fe50ffa..0000000 --- a/vendor/github.com/steakknife/bloomfilter/statistics.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "math" - - "github.com/steakknife/hamming" -) - -// PreciseFilledRatio is an exhaustive count # of 1's -func (f *Filter) PreciseFilledRatio() float64 { - f.lock.RLock() - defer f.lock.RUnlock() - - return float64(hamming.CountBitsUint64s(f.bits)) / float64(f.M()) -} - -// N is how many elements have been inserted -// (actually, how many Add()s have been performed?) -func (f *Filter) N() uint64 { - f.lock.RLock() - defer f.lock.RUnlock() - - return f.n -} - -// FalsePosititveProbability is the upper-bound probability of false positives -// (1 - exp(-k*(n+0.5)/(m-1))) ** k -func (f *Filter) FalsePosititveProbability() float64 { - k := float64(f.K()) - n := float64(f.N()) - m := float64(f.M()) - return math.Pow(1.0-math.Exp(-k)*(n+0.5)/(m-1), k) -} diff --git a/vendor/github.com/steakknife/bloomfilter/textmarshaler.go b/vendor/github.com/steakknife/bloomfilter/textmarshaler.go deleted file mode 100644 index 7ed08eb..0000000 --- a/vendor/github.com/steakknife/bloomfilter/textmarshaler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import "fmt" - -// MarshalText conforms to encoding.TextMarshaler -func (f *Filter) MarshalText() (text []byte, err error) { - f.lock.RLock() - defer f.lock.RUnlock() - - s := fmt.Sprintln("k") - s += fmt.Sprintln(f.K()) - s += fmt.Sprintln("n") - s += fmt.Sprintln(f.n) - s += fmt.Sprintln("m") - s += fmt.Sprintln(f.m) - - s += fmt.Sprintln("keys") - for key := range f.keys { - s += fmt.Sprintf(keyFormat, key) + nl() - } - - s += fmt.Sprintln("bits") - for w := range f.bits { - s += fmt.Sprintf(bitsFormat, w) + nl() - } - - _, hash, err := f.marshal() - if err != nil { - return nil, err - } - s += fmt.Sprintln("sha384") - for b := range hash { - s += fmt.Sprintf("%02x", b) - } - s += nl() - - text = []byte(s) - return text, nil -} diff --git a/vendor/github.com/steakknife/bloomfilter/textunmarshaler.go b/vendor/github.com/steakknife/bloomfilter/textunmarshaler.go deleted file mode 100644 index 93240a1..0000000 --- a/vendor/github.com/steakknife/bloomfilter/textunmarshaler.go +++ /dev/null @@ -1,150 +0,0 @@ -// Package bloomfilter is face-meltingly fast, thread-safe, -// marshalable, unionable, probability- and -// optimal-size-calculating Bloom filter in go -// -// https://github.com/steakknife/bloomfilter -// -// Copyright © 2014, 2015, 2018 Barry Allard -// -// MIT license -// -package bloomfilter - -import ( - "bytes" - "crypto/hmac" - "crypto/sha512" - "fmt" - "io" -) - -const ( - keyFormat = "%016x" - bitsFormat = "%016x" -) - -func nl() string { - return fmt.Sprintln() -} - -func unmarshalTextHeader(r io.Reader) (k, n, m uint64, err error) { - format := "k" + nl() + "%d" + nl() - format += "n" + nl() + "%d" + nl() - format += "m" + nl() + "%d" + nl() - format += "keys" + nl() - - _, err = fmt.Fscanf(r, format, k, n, m) - return k, n, m, err -} - -func unmarshalTextKeys(r io.Reader, keys []uint64) (err error) { - for i := range keys { - _, err = fmt.Fscanf(r, keyFormat, keys[i]) - if err != nil { - return err - } - } - return nil -} - -func unmarshalTextBits(r io.Reader, bits []uint64) (err error) { - _, err = fmt.Fscanf(r, "bits") - if err != nil { - return err - } - - for i := range bits { - _, err = fmt.Fscanf(r, bitsFormat, bits[i]) - if err != nil { - return err - } - } - - return nil -} - -func unmarshalAndCheckTextHash(r io.Reader, f *Filter) (err error) { - _, err = fmt.Fscanf(r, "sha384") - if err != nil { - return err - } - - actualHash := [sha512.Size384]byte{} - - for i := range actualHash { - _, err = fmt.Fscanf(r, "%02x", actualHash[i]) - if err != nil { - return err - } - } - - _, expectedHash, err := f.marshal() - if err != nil { - return err - } - - if !hmac.Equal(expectedHash[:], actualHash[:]) { - return errHash() - } - - return nil -} - -// UnmarshalText conforms to TextUnmarshaler -func UnmarshalText(text []byte) (f *Filter, err error) { - r := bytes.NewBuffer(text) - k, n, m, err := unmarshalTextHeader(r) - if err != nil { - return nil, err - } - - keys, err := newKeysBlank(k) - if err != nil { - return nil, err - } - - err = unmarshalTextKeys(r, keys) - if err != nil { - return nil, err - } - - bits, err := newBits(m) - if err != nil { - return nil, err - } - - err = unmarshalTextBits(r, bits) - if err != nil { - return nil, err - } - - f, err = newWithKeysAndBits(m, keys, bits, n) - if err != nil { - return nil, err - } - - err = unmarshalAndCheckTextHash(r, f) - if err != nil { - return nil, err - } - - return f, nil -} - -// UnmarshalText method overwrites f with data decoded from text -func (f *Filter) UnmarshalText(text []byte) error { - f.lock.Lock() - defer f.lock.Unlock() - - f2, err := UnmarshalText(text) - if err != nil { - return err - } - - f.m = f2.m - f.n = f2.n - copy(f.bits, f2.bits) - copy(f.keys, f2.keys) - - return nil -} diff --git a/vendor/github.com/steakknife/hamming/.gitignore b/vendor/github.com/steakknife/hamming/.gitignore deleted file mode 100644 index 928a2c6..0000000 --- a/vendor/github.com/steakknife/hamming/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.o -/coverage.out diff --git a/vendor/github.com/steakknife/hamming/.travis.yml b/vendor/github.com/steakknife/hamming/.travis.yml deleted file mode 100644 index 7381212..0000000 --- a/vendor/github.com/steakknife/hamming/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -dist: trusty -sudo: false -go: - - "1.8.x" - - "1.9.x" - - "1.10.x" - - master -before_script: - - "go get -u gopkg.in/alecthomas/gometalinter.v2" - - "gometalinter.v2 --install" -script: - - "go test -v -cover -benchmem -bench=. $(go list ./... | grep -v /vendor/ | sed \"s&_${PWD}&.&\")" - - "gometalinter.v2 --enable-all --exclude=dupl ./..." diff --git a/vendor/github.com/steakknife/hamming/MIT-LICENSE.txt b/vendor/github.com/steakknife/hamming/MIT-LICENSE.txt deleted file mode 100644 index 924f4c0..0000000 --- a/vendor/github.com/steakknife/hamming/MIT-LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright © 2014, 2015, 2016 Barry Allard - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/steakknife/hamming/README.md b/vendor/github.com/steakknife/hamming/README.md deleted file mode 100644 index 23f69bd..0000000 --- a/vendor/github.com/steakknife/hamming/README.md +++ /dev/null @@ -1,82 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/steakknife/hamming?status.png)](https://godoc.org/github.com/steakknife/hamming) [![Build Status](https://travis-ci.org/steakknife/hamming.svg?branch=master)](https://travis-ci.org/steakknife/hamming) - - -# hamming distance calculations in Go - -Copyright © 2014, 2015, 2016, 2018 Barry Allard - -[MIT license](MIT-LICENSE.txt) - -## Performance - -``` -$ go test -bench=. -BenchmarkCountBitsInt8PopCnt-4 300000000 4.30 ns/op -BenchmarkCountBitsInt16PopCnt-4 300000000 3.83 ns/op -BenchmarkCountBitsInt32PopCnt-4 300000000 3.64 ns/op -BenchmarkCountBitsInt64PopCnt-4 500000000 3.60 ns/op -BenchmarkCountBitsIntPopCnt-4 300000000 5.72 ns/op -BenchmarkCountBitsUint8PopCnt-4 1000000000 2.98 ns/op -BenchmarkCountBitsUint16PopCnt-4 500000000 3.23 ns/op -BenchmarkCountBitsUint32PopCnt-4 500000000 3.00 ns/op -BenchmarkCountBitsUint64PopCnt-4 1000000000 2.94 ns/op -BenchmarkCountBitsUintPopCnt-4 300000000 5.04 ns/op -BenchmarkCountBitsBytePopCnt-4 300000000 3.99 ns/op -BenchmarkCountBitsRunePopCnt-4 300000000 3.83 ns/op -BenchmarkCountBitsInt8-4 2000000000 0.74 ns/op -BenchmarkCountBitsInt16-4 2000000000 1.54 ns/op -BenchmarkCountBitsInt32-4 1000000000 2.63 ns/op -BenchmarkCountBitsInt64-4 1000000000 2.56 ns/op -BenchmarkCountBitsInt-4 200000000 7.23 ns/op -BenchmarkCountBitsUint16-4 2000000000 1.51 ns/op -BenchmarkCountBitsUint32-4 500000000 4.00 ns/op -BenchmarkCountBitsUint64-4 1000000000 2.64 ns/op -BenchmarkCountBitsUint64Alt-4 200000000 7.60 ns/op -BenchmarkCountBitsUint-4 300000000 5.48 ns/op -BenchmarkCountBitsUintReference-4 100000000 19.2 ns/op -BenchmarkCountBitsByte-4 2000000000 0.75 ns/op -BenchmarkCountBitsByteAlt-4 1000000000 2.37 ns/op -BenchmarkCountBitsRune-4 500000000 2.85 ns/op -PASS -ok _/Users/bmf/Projects/hamming 58.305s -$ -``` - -## Usage - -```go -import 'github.com/steakknife/hamming' - -// ... - -// hamming distance between values -hamming.Byte(0xFF, 0x00) // 8 -hamming.Byte(0x00, 0x00) // 0 - -// just count bits in a byte -hamming.CountBitsByte(0xA5), // 4 -``` - -See help in the [docs](https://godoc.org/github.com/steakknife/hamming) - -## Get - - go get -u github.com/steakknife/hamming # master is always stable - -## Source - -- On the web: https://github.com/steakknife/hamming - -- Git: `git clone https://github.com/steakknife/hamming` - -## Contact - -- [Feedback](mailto:barry.allard@gmail.com) - -- [Issues](https://github.com/steakknife/hamming/issues) - -## License - -[MIT license](MIT-LICENSE.txt) - -Copyright © 2014, 2015, 2016 Barry Allard diff --git a/vendor/github.com/steakknife/hamming/doc.go b/vendor/github.com/steakknife/hamming/doc.go deleted file mode 100644 index 179e29d..0000000 --- a/vendor/github.com/steakknife/hamming/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -// -// Usage -// -// For functions named CountBits.+s?. The plural forms are for slices. -// The CountBits.+ forms are Population Count only, where the bare-type -// forms are Hamming distance (number of bits different) between two values. -// -// Optimized assembly .+PopCnt forms are available on amd64, and operate just -// like the regular forms (Must check and guard on HasPopCnt() first before -// trying to call .+PopCnt functions). -// -// import 'github.com/steakknife/hamming' -// -// // ... -// -// // hamming distance between values -// hamming.Byte(0xFF, 0x00) // 8 -// hamming.Byte(0x00, 0x00) // 0 -// -// // just count bits in a byte -// hamming.CountBitsByte(0xA5), // 4 -// -// Got rune? use int32 -// Got uint8? use byte -// -package hamming diff --git a/vendor/github.com/steakknife/hamming/hamming.go b/vendor/github.com/steakknife/hamming/hamming.go deleted file mode 100644 index 269e91a..0000000 --- a/vendor/github.com/steakknife/hamming/hamming.go +++ /dev/null @@ -1,70 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -// Int8 hamming distance of two int8's -func Int8(x, y int8) int { - return CountBitsInt8(x ^ y) -} - -// Int16 hamming distance of two int16's -func Int16(x, y int16) int { - return CountBitsInt16(x ^ y) -} - -// Int32 hamming distance of two int32's -func Int32(x, y int32) int { - return CountBitsInt32(x ^ y) -} - -// Int64 hamming distance of two int64's -func Int64(x, y int64) int { - return CountBitsInt64(x ^ y) -} - -// Int hamming distance of two ints -func Int(x, y int) int { - return CountBitsInt(x ^ y) -} - -// Uint8 hamming distance of two uint8's -func Uint8(x, y uint8) int { - return CountBitsUint8(x ^ y) -} - -// Uint16 hamming distance of two uint16's -func Uint16(x, y uint16) int { - return CountBitsUint16(x ^ y) -} - -// Uint32 hamming distance of two uint32's -func Uint32(x, y uint32) int { - return CountBitsUint32(x ^ y) -} - -// Uint64 hamming distance of two uint64's -func Uint64(x, y uint64) int { - return CountBitsUint64(x ^ y) -} - -// Uint hamming distance of two uint's -func Uint(x, y uint) int { - return CountBitsUint(x ^ y) -} - -// Byte hamming distance of two bytes -func Byte(x, y byte) int { - return CountBitsByte(x ^ y) -} - -// Rune hamming distance of two runes -func Rune(x, y rune) int { - return CountBitsRune(x ^ y) -} diff --git a/vendor/github.com/steakknife/hamming/popcnt_amd64.go b/vendor/github.com/steakknife/hamming/popcnt_amd64.go deleted file mode 100644 index a1a6d92..0000000 --- a/vendor/github.com/steakknife/hamming/popcnt_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -import "strconv" - -// HasPopCnt returns true if *PopCnt functions are callable -func HasPopCnt() (ret bool) - -// CountBitsInt8PopCnt count 1's in x -func CountBitsInt8PopCnt(x int8) (ret int) - -// CountBitsInt16PopCnt count 1's in x -func CountBitsInt16PopCnt(x int16) (ret int) - -// CountBitsInt32PopCnt count 1's in x -func CountBitsInt32PopCnt(x int32) (ret int) - -// CountBitsInt64PopCnt count 1's in x -func CountBitsInt64PopCnt(x int64) (ret int) - -// CountBitsIntPopCnt count 1's in x -func CountBitsIntPopCnt(x int) int { - if strconv.IntSize == 64 { - return CountBitsInt64PopCnt(int64(x)) - } else if strconv.IntSize == 32 { - return CountBitsInt32PopCnt(int32(x)) - } - panic("strconv.IntSize must be 32 or 64") -} - -// CountBitsUint8PopCnt count 1's in x -func CountBitsUint8PopCnt(x uint8) (ret int) - -// CountBitsUint16PopCnt count 1's in x -func CountBitsUint16PopCnt(x uint16) (ret int) - -// CountBitsUint32PopCnt count 1's in x -func CountBitsUint32PopCnt(x uint32) (ret int) - -// CountBitsUint64PopCnt count 1's in x -func CountBitsUint64PopCnt(x uint64) (ret int) - -// CountBitsUintPopCnt count 1's in x -func CountBitsUintPopCnt(x uint) int { - if strconv.IntSize == 64 { - return CountBitsUint64PopCnt(uint64(x)) - } else if strconv.IntSize == 32 { - return CountBitsUint32PopCnt(uint32(x)) - } - panic("strconv.IntSize must be 32 or 64") -} - -// CountBitsBytePopCnt count 1's in x -func CountBitsBytePopCnt(x byte) (ret int) - -// CountBitsRunePopCnt count 1's in x -func CountBitsRunePopCnt(x rune) (ret int) diff --git a/vendor/github.com/steakknife/hamming/popcnt_amd64.s b/vendor/github.com/steakknife/hamming/popcnt_amd64.s deleted file mode 100644 index 51c5124..0000000 --- a/vendor/github.com/steakknife/hamming/popcnt_amd64.s +++ /dev/null @@ -1,64 +0,0 @@ -// -// hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016 Barry Allard -// -// MIT license -// - -#include "textflag.h" - -TEXT ·CountBitsInt8PopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsBytePopCnt(SB) - -TEXT ·CountBitsInt16PopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint16PopCnt(SB) - -TEXT ·CountBitsInt32PopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint32PopCnt(SB) - -TEXT ·CountBitsInt64PopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint64PopCnt(SB) - -TEXT ·CountBitsBytePopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint8PopCnt(SB) - -TEXT ·CountBitsRunePopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint32PopCnt(SB) - -TEXT ·CountBitsUint8PopCnt(SB),NOSPLIT,$0 - XORQ AX, AX - MOVB x+0(FP), AX - POPCNTQ AX, AX - MOVQ AX, ret+8(FP) - RET - -TEXT ·CountBitsUint16PopCnt(SB),NOSPLIT,$0 - XORQ AX, AX - MOVW x+0(FP), AX - POPCNTQ AX, AX - MOVQ AX, ret+8(FP) - RET - -TEXT ·CountBitsUint32PopCnt(SB),NOSPLIT,$0 - XORQ AX, AX - MOVL x+0(FP), AX - POPCNTQ AX, AX - MOVQ AX, ret+8(FP) - RET - -TEXT ·CountBitsUint64PopCnt(SB),NOSPLIT,$0 - POPCNTQ x+0(FP), AX - MOVQ AX, ret+8(FP) - RET - -// func hasPopCnt() (ret bool) -TEXT ·HasPopCnt(SB),NOSPLIT,$0 - MOVL $1, AX - CPUID - SHRL $23, CX // bit 23: Advanced Bit Manipulation Bit (ABM) -> POPCNTQ - ANDL $1, CX - MOVB CX, ret+0(FP) - RET diff --git a/vendor/github.com/steakknife/hamming/popcount.go b/vendor/github.com/steakknife/hamming/popcount.go deleted file mode 100644 index 848103b..0000000 --- a/vendor/github.com/steakknife/hamming/popcount.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -import "strconv" - -// References: check out Hacker's Delight, about p. 70 - -func table() [256]uint8 { - return [256]uint8{ - 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, - 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, - } -} - -// CountBitsByteAlt table-less, branch-free implementation -func CountBitsByteAlt(x byte) int { - x = (x & 0x55) + ((x >> 1) & 0x55) - x = (x & 0x33) + ((x >> 2) & 0x33) - return int((x & 0x0f) + ((x >> 4) & 0x0f)) -} - -// CountBitsInt8 count 1's in x -func CountBitsInt8(x int8) int { return CountBitsByte(byte(x)) } - -// CountBitsInt16 count 1's in x -func CountBitsInt16(x int16) int { return CountBitsUint16(uint16(x)) } - -// CountBitsInt32 count 1's in x -func CountBitsInt32(x int32) int { return CountBitsUint32(uint32(x)) } - -// CountBitsInt64 count 1's in x -func CountBitsInt64(x int64) int { return CountBitsUint64(uint64(x)) } - -// CountBitsInt count 1's in x -func CountBitsInt(x int) int { return CountBitsUint(uint(x)) } - -// CountBitsByte count 1's in x -func CountBitsByte(x byte) int { return CountBitsUint8(x) } - -// CountBitsRune count 1's in x -func CountBitsRune(x rune) int { return CountBitsInt32(x) } - -// CountBitsUint8 count 1's in x -func CountBitsUint8(x uint8) int { return int(table()[x]) } - -// CountBitsUint16 count 1's in x -func CountBitsUint16(x uint16) int { - return int(table()[x&0xFF] + table()[(x>>8)&0xFF]) -} - -const ( - m1d uint32 = 0x55555555 - m2d = 0x33333333 - m4d = 0x0f0f0f0f -) - -// CountBitsUint32 count 1's in x -func CountBitsUint32(x uint32) int { - x -= ((x >> 1) & m1d) - x = (x & m2d) + ((x >> 2) & m2d) - x = (x + (x >> 4)) & m4d - x += x >> 8 - x += x >> 16 - return int(x & 0x3f) -} - -const ( - m1q uint64 = 0x5555555555555555 - m2q = 0x3333333333333333 - m4q = 0x0f0f0f0f0f0f0f0f - hq = 0x0101010101010101 -) - -// CountBitsUint64 count 1's in x -func CountBitsUint64(x uint64) int { - // put count of each 2 bits into those 2 bits - x -= (x >> 1) & m1q - - // put count of each 4 bits into those 4 bits - x = (x & m2q) + ((x >> 2) & m2q) - - // put count of each 8 bits into those 8 bits - x = (x + (x >> 4)) & m4q - - // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ... - return int((x * hq) >> 56) -} - -// CountBitsUint64Alt count 1's in x -func CountBitsUint64Alt(x uint64) int { - return CountBitsUint32(uint32(x>>32)) + CountBitsUint32(uint32(x)) -} - -// CountBitsUintReference count 1's in x -func CountBitsUintReference(x uint) int { - c := 0 - for x != 0 { - x &= x - 1 - c++ - } - return c -} - -// CountBitsUint count 1's in x -func CountBitsUint(x uint) int { - if strconv.IntSize == 64 { - return CountBitsUint64(uint64(x)) - } else if strconv.IntSize == 32 { - return CountBitsUint32(uint32(x)) - } - panic("strconv.IntSize must be 32 or 64 bits") -} diff --git a/vendor/github.com/steakknife/hamming/popcount_slices.go b/vendor/github.com/steakknife/hamming/popcount_slices.go deleted file mode 100644 index 957fe11..0000000 --- a/vendor/github.com/steakknife/hamming/popcount_slices.go +++ /dev/null @@ -1,123 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -// CountBitsInt8s count 1's in b -func CountBitsInt8s(b []int8) int { - c := 0 - for _, x := range b { - c += CountBitsInt8(x) - } - return c -} - -// CountBitsInt16s count 1's in b -func CountBitsInt16s(b []int16) int { - c := 0 - for _, x := range b { - c += CountBitsInt16(x) - } - return c -} - -// CountBitsInt32s count 1's in b -func CountBitsInt32s(b []int32) int { - c := 0 - for _, x := range b { - c += CountBitsInt32(x) - } - return c -} - -// CountBitsInt64s count 1's in b -func CountBitsInt64s(b []int64) int { - c := 0 - for _, x := range b { - c += CountBitsInt64(x) - } - return c -} - -// CountBitsInts count 1's in b -func CountBitsInts(b []int) int { - c := 0 - for _, x := range b { - c += CountBitsInt(x) - } - return c -} - -// CountBitsUint8s count 1's in b -func CountBitsUint8s(b []uint8) int { - c := 0 - for _, x := range b { - c += CountBitsUint8(x) - } - return c -} - -// CountBitsUint16s count 1's in b -func CountBitsUint16s(b []uint16) int { - c := 0 - for _, x := range b { - c += CountBitsUint16(x) - } - return c -} - -// CountBitsUint32s count 1's in b -func CountBitsUint32s(b []uint32) int { - c := 0 - for _, x := range b { - c += CountBitsUint32(x) - } - return c -} - -// CountBitsUint64s count 1's in b -func CountBitsUint64s(b []uint64) int { - c := 0 - for _, x := range b { - c += CountBitsUint64(x) - } - return c -} - -// CountBitsUints count 1's in b -func CountBitsUints(b []uint) int { - c := 0 - for _, x := range b { - c += CountBitsUint(x) - } - return c -} - -// CountBitsBytes count 1's in b -func CountBitsBytes(b []byte) int { - c := 0 - for _, x := range b { - c += CountBitsByte(x) - } - return c -} - -// CountBitsRunes count 1's in b -func CountBitsRunes(b []rune) int { - c := 0 - for _, x := range b { - c += CountBitsRune(x) - } - return c -} - -// CountBitsString count 1's in s -func CountBitsString(s string) int { - return CountBitsBytes([]byte(s)) -} diff --git a/vendor/github.com/steakknife/hamming/popcount_slices_amd64.go b/vendor/github.com/steakknife/hamming/popcount_slices_amd64.go deleted file mode 100644 index b3e13fd..0000000 --- a/vendor/github.com/steakknife/hamming/popcount_slices_amd64.go +++ /dev/null @@ -1,72 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -import ( - "strconv" - "unsafe" -) - -// CountBitsInt8sPopCnt count 1's in x -func CountBitsInt8sPopCnt(x []int8) (ret int) - -// CountBitsInt16sPopCnt count 1's in x -func CountBitsInt16sPopCnt(x []int16) (ret int) - -// CountBitsInt32sPopCnt count 1's in x -func CountBitsInt32sPopCnt(x []int32) (ret int) - -// CountBitsInt64sPopCnt count 1's in x -func CountBitsInt64sPopCnt(x []int64) (ret int) - -// CountBitsIntsPopCnt count 1's in x -func CountBitsIntsPopCnt(x []int) int { - if strconv.IntSize == 64 { - y := (*[]int64)(unsafe.Pointer(&x)) // #nosec G103 - return CountBitsInt64sPopCnt(*y) - } else if strconv.IntSize == 32 { - y := (*[]int32)(unsafe.Pointer(&x)) // #nosec G103 - return CountBitsInt32sPopCnt(*y) - } - panic("strconv.IntSize must be 32 or 64 bits") -} - -// CountBitsUint8sPopCnt count 1's in x -func CountBitsUint8sPopCnt(x []uint8) (ret int) - -// CountBitsUint16sPopCnt count 1's in x -func CountBitsUint16sPopCnt(x []uint16) (ret int) - -// CountBitsUint32sPopCnt count 1's in x -func CountBitsUint32sPopCnt(x []uint32) (ret int) - -// CountBitsUint64sPopCnt count 1's in x -func CountBitsUint64sPopCnt(x []uint64) (ret int) - -// CountBitsUintsPopCnt count 1's in x -func CountBitsUintsPopCnt(x []uint) int { - if strconv.IntSize == 64 { - y := (*[]uint64)(unsafe.Pointer(&x)) // #nosec G103 - return CountBitsUint64sPopCnt(*y) - } else if strconv.IntSize == 32 { - y := (*[]uint32)(unsafe.Pointer(&x)) // #nosec G103 - return CountBitsUint32sPopCnt(*y) - } - panic("strconv.IntSize must be 32 or 64 bits") -} - -// CountBitsBytesPopCnt count 1's in x -func CountBitsBytesPopCnt(x []byte) (ret int) - -// CountBitsRunesPopCnt count 1's in x -func CountBitsRunesPopCnt(x []rune) (ret int) - -// CountBitsStringPopCnt count 1's in s -func CountBitsStringPopCnt(s string) (ret int) diff --git a/vendor/github.com/steakknife/hamming/popcount_slices_amd64.s b/vendor/github.com/steakknife/hamming/popcount_slices_amd64.s deleted file mode 100644 index b6b8c78..0000000 --- a/vendor/github.com/steakknife/hamming/popcount_slices_amd64.s +++ /dev/null @@ -1,370 +0,0 @@ -// -// hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016 Barry Allard -// -// MIT license -// - -#include "textflag.h" - -// type SliceHeader struct { -// Data uintptr 0 -// Len int 8 -// Cap int 16 -// } - -// 0 x.Data -// 8 x.Len -// 16 x.Cap -// 24 ret - -// type StringHeader struct { -// Data uintptr 0 -// Len int 8 -// } - -// 0 x.Data -// 8 x.Len -// 16 ret - -// func CountBitsInt8sPopCnt(x []int8) (ret int) -TEXT ·CountBitsInt8sPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint8sPopCnt(SB) - -// func CountBitsInt16sPopCnt(x []int16) (ret int) -TEXT ·CountBitsInt16sPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint16sPopCnt(SB) - -// func CountBitsInt32sPopCnt(x []int32) (ret int) -TEXT ·CountBitsInt32sPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint32sPopCnt(SB) - -// func CountBitsInt64sPopCnt(x []int64) (ret int) -TEXT ·CountBitsInt64sPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint64sPopCnt(SB) - -// func CountBitsUint8sPopCnt(x []uint8) (ret int) -TEXT ·CountBitsUint8sPopCnt(SB),NOSPLIT,$0 - XORQ AX, AX // ret = 0 - MOVQ x+8(FP), CX // x.Len -> CX - -test_negative_slice_len: - MOVQ CX, BX // x.Len < 0 ---> x.Len[63] != 0 - SHRQ $63, BX - JNZ done - - MOVQ x+0(FP), DI // x.Data -> DI - - CMPQ CX, $32 // x.Len >= 32 - JL unrolled_loop_skip - -unrolled_loop_setup: - XORQ R9, R9 - XORQ BX, BX - XORQ DX, DX - -unrolled_loop: // 4 unrolled loops of POPCNTQ (4 quad words at a time) - SUBQ $32, CX - - POPCNTQ 0(DI), R10 - ADDQ R10, R9 - POPCNTQ 8(DI), R11 - ADDQ R11, AX - POPCNTQ 16(DI), R12 - ADDQ R12, BX - POPCNTQ 24(DI), R13 - ADDQ R13, DX - - ADDQ $32, DI - CMPQ CX, $32 // x.Len >= 32 - JGE unrolled_loop - -unrolled_loop_done: - ADDQ R9, AX - ADDQ BX, DX - ADDQ DX, AX - - XORQ BX, BX - -unrolled_loop_skip: - CMPQ CX, $0 - JZ done - - XORQ DX, DX - -remainder_loop: - MOVB 0(DI), DL - POPCNTQ DX, BX - ADDQ BX, AX - - INCQ DI - DECQ CX - JNZ remainder_loop - -done: - MOVQ AX, ret+24(FP) - RET - -// func CountBitsUint16sPopCnt(x []uint16) (ret int) -TEXT ·CountBitsUint16sPopCnt(SB),NOSPLIT,$0 - XORQ AX, AX // ret = 0 - MOVQ x+8(FP), CX // x.Len -> CX - -test_negative_slice_len: - MOVQ CX, BX // x.Len*2 < 0 ---> x.Len[63:62] != 0 - SHLQ $1, CX - SHRQ $62, BX - JNZ done - - MOVQ x+0(FP), DI // x.Data -> DI - - - CMPQ CX, $32 // x.Len*2 >= 32 - JL unrolled_loop_skip - -unrolled_loop_setup: - XORQ R9, R9 - XORQ BX, BX - XORQ DX, DX - -unrolled_loop: // 4 unrolled loops of POPCNTQ (4 quad words at a time) - SUBQ $32, CX - - POPCNTQ 0(DI), R10 - ADDQ R10, R9 - POPCNTQ 8(DI), R11 - ADDQ R11, AX - POPCNTQ 16(DI), R12 - ADDQ R12, BX - POPCNTQ 24(DI), R13 - ADDQ R13, DX - - ADDQ $32, DI - CMPQ CX, $32 // x.Len*2 >= 32 - JGE unrolled_loop - -unrolled_loop_done: - ADDQ R9, AX - ADDQ BX, DX - ADDQ DX, AX - - XORQ BX, BX - -unrolled_loop_skip: - CMPQ CX, $0 - JZ done - - XORQ DX, DX - -remainder_loop: - MOVW 0(DI), DX - POPCNTQ DX, BX - ADDQ BX, AX - - ADDQ $2, DI - SUBQ $2, CX - JNZ remainder_loop - -done: - MOVQ AX, ret+24(FP) - RET - -// func CountBitsUint32sPopCnt(x []uint32) (ret int) -TEXT ·CountBitsUint32sPopCnt(SB),NOSPLIT,$0 - XORQ AX, AX // ret = 0 - MOVQ x+8(FP), CX // x.Len -> CX - MOVQ CX, BX - MOVQ x+0(FP), DI // x.Data -> DI - -test_negative_slice_len: - SHLQ $2, CX // x.Len*4 < 0 ---> x.Len[63:61] != 0 - SHRQ $61, BX - JNZ done - - - - CMPQ CX, $32 // x.Len*4 >= 32 - JL unrolled_loop_skip - -unrolled_loop_setup: - XORQ R9, R9 - XORQ BX, BX - XORQ DX, DX - -unrolled_loop: // 4 unrolled loops of POPCNTQ (4 quad words at a time) - SUBQ $32, CX - - POPCNTQ 0(DI), R10 // r9 += popcntq(QW DI+0) - ADDQ R10, R9 - POPCNTQ 8(DI), R11 // ax += popcntq(QW DI+8) - ADDQ R11, AX - POPCNTQ 16(DI), R12 // bx += popcntq(QW DI+16) - ADDQ R12, BX - POPCNTQ 24(DI), R13 // dx += popcntq(QW DI+24) - ADDQ R13, DX - - ADDQ $32, DI - CMPQ CX, $32 // x.Len*4 >= 32 - JGE unrolled_loop - -unrolled_loop_done: - ADDQ R9, AX // ax = (ax + r9) + (bx + dx) - ADDQ BX, DX - ADDQ DX, AX - - XORQ BX, BX - -unrolled_loop_skip: - CMPQ CX, $0 - JZ done - - XORQ DX, DX -remainder_loop: - MOVB (DI), DX // ax += popcnt(DB 0(DI)) - POPCNTQ DX, BX - ADDQ BX, AX - - INCQ DI - DECQ CX - JNZ remainder_loop - -done: - MOVQ AX, ret+24(FP) - RET - -// func CountBitsUint64sPopCnt(x []uint64) (ret int) -TEXT ·CountBitsUint64sPopCnt(SB),NOSPLIT,$0 - XORQ AX, AX // ret = 0 - MOVQ x+8(FP), CX // x.Len -> CX - -test_negative_slice_len: - MOVQ CX, BX // x.Len*8 < 0 ---> x.Len[63:60] != 0 - SHLQ $3, CX - SHRQ $60, BX - JNZ done - - MOVQ x+0(FP), DI // x.Data -> DI - - - CMPQ CX, $32 // x.Len*8 >= 32 - JL unrolled_loop_skip - -unrolled_loop_setup: - XORQ R9, R9 - XORQ BX, BX - XORQ DX, DX - -unrolled_loop: // 4 unrolled loops of POPCNTQ (4 quad words at a time) - SUBQ $32, CX - - POPCNTQ 0(DI), R10 - ADDQ R10, R9 - POPCNTQ 8(DI), R11 - ADDQ R11, AX - POPCNTQ 16(DI), R12 - ADDQ R12, BX - POPCNTQ 24(DI), R13 - ADDQ R13, DX - - ADDQ $32, DI - CMPQ CX, $32 // x.Len*4 >= 32 - JGE unrolled_loop - -unrolled_loop_done: - ADDQ R9, AX - ADDQ BX, DX - ADDQ DX, AX - - XORQ BX, BX - -unrolled_loop_skip: - CMPQ CX, $0 - JZ done - - XORQ DX, DX - -remainder_loop: - MOVQ 0(DI), DX - POPCNTQ DX, BX - ADDQ BX, AX - - ADDQ $8, DI - SUBQ $8, CX - JNZ remainder_loop - -done: - MOVQ AX, ret+24(FP) - RET - -// func CountBitsBytesPopCnt(x []byte) (ret int) -TEXT ·CountBitsBytesPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint8sPopCnt(SB) - -// func CountBitsRunesPopCnt(x []rune) (ret int) -TEXT ·CountBitsRunesPopCnt(SB),NOSPLIT,$0 - JMP ·CountBitsUint32sPopCnt(SB) - -// func CountBitsStringPopCnt(s string) (ret int) -TEXT ·CountBitsStringPopCnt(SB),NOSPLIT,$0 - XORQ AX, AX // ret = 0 - MOVQ x+8(FP), CX // x.Len -> CX - -test_negative_slice_len: - MOVQ CX, BX // x.Len < 0 ---> x.Len[63] != 0 - SHRQ $63, BX - JNZ done - - MOVQ x+0(FP), DI // x.Data -> DI - - CMPQ CX, $32 // x.Len >= 32 - JL unrolled_loop_skip - -unrolled_loop_setup: - XORQ R9, R9 - XORQ BX, BX - XORQ DX, DX - -unrolled_loop: // 4 unrolled loops of POPCNTQ (4 quad words at a time) - SUBQ $32, CX - - POPCNTQ 0(DI), R10 - ADDQ R10, R9 - POPCNTQ 8(DI), R11 - ADDQ R11, AX - POPCNTQ 16(DI), R12 - ADDQ R12, BX - POPCNTQ 24(DI), R13 - ADDQ R13, DX - - ADDQ $32, DI - CMPQ CX, $32 // x.Len >= 32 - JGE unrolled_loop - -unrolled_loop_done: - ADDQ R9, AX - ADDQ BX, DX - ADDQ DX, AX - - XORQ BX, BX - -unrolled_loop_skip: - CMPQ CX, $0 - JZ done - - XORQ DX, DX - -remainder_loop: - MOVB 0(DI), DL - POPCNTQ DX, BX - ADDQ BX, AX - - INCQ DI - DECQ CX - JNZ remainder_loop - -done: - MOVQ AX, ret+16(FP) - RET diff --git a/vendor/github.com/steakknife/hamming/slices_of_hamming.go b/vendor/github.com/steakknife/hamming/slices_of_hamming.go deleted file mode 100644 index 82ce948..0000000 --- a/vendor/github.com/steakknife/hamming/slices_of_hamming.go +++ /dev/null @@ -1,144 +0,0 @@ -// -// Package hamming distance calculations in Go -// -// https://github.com/steakknife/hamming -// -// Copyright © 2014, 2015, 2016, 2018 Barry Allard -// -// MIT license -// -package hamming - -// Int8s hamming distance of two int8 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Int8s(b0, b1 []int8) int { - d := 0 - for i, x := range b0 { - d += Int8(x, b1[i]) - } - return d -} - -// Int16s hamming distance of two int16 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Int16s(b0, b1 []int16) int { - d := 0 - for i, x := range b0 { - d += Int16(x, b1[i]) - } - return d -} - -// Int32s hamming distance of two int32 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Int32s(b0, b1 []int32) int { - d := 0 - for i, x := range b0 { - d += Int32(x, b1[i]) - } - return d -} - -// Int64s hamming distance of two int64 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Int64s(b0, b1 []int64) int { - d := 0 - for i, x := range b0 { - d += Int64(x, b1[i]) - } - return d -} - -// Ints hamming distance of two int buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Ints(b0, b1 []int) int { - d := 0 - for i, x := range b0 { - d += Int(x, b1[i]) - } - return d -} - -// Uint8s hamming distance of two uint8 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Uint8s(b0, b1 []uint8) int { - d := 0 - for i, x := range b0 { - d += Uint8(x, b1[i]) - } - return d -} - -// Uint16s hamming distance of two uint16 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Uint16s(b0, b1 []uint16) int { - d := 0 - for i, x := range b0 { - d += Uint16(x, b1[i]) - } - return d -} - -// Uint32s hamming distance of two uint32 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Uint32s(b0, b1 []uint32) int { - d := 0 - for i, x := range b0 { - d += Uint32(x, b1[i]) - } - return d -} - -// Uint64s hamming distance of two uint64 buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Uint64s(b0, b1 []uint64) int { - d := 0 - for i, x := range b0 { - d += Uint64(x, b1[i]) - } - return d -} - -// Uints hamming distance of two uint buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Uints(b0, b1 []uint) int { - d := 0 - for i, x := range b0 { - d += Uint(x, b1[i]) - } - return d -} - -// Bytes hamming distance of two byte buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Bytes(b0, b1 []byte) int { - d := 0 - for i, x := range b0 { - d += Byte(x, b1[i]) - } - return d -} - -// Runes hamming distance of two rune buffers, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Runes(b0, b1 []rune) int { - d := 0 - for i, x := range b0 { - d += Rune(x, b1[i]) - } - return d -} - -// Strings hamming distance of two strings, of which the size of b0 -// is used for both (panics if b1 < b0, does not compare b1 beyond length of b0) -func Strings(b0, b1 string) int { - return Runes(runes(b0), runes(b1)) -} - -// runize string -func runes(s string) (r []rune) { - for _, ch := range s { - r = append(r, ch) - } - return -} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d..0000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b..0000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1d..0000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592..0000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80..0000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/modules.txt b/vendor/modules.txt index a311437..c5f4778 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,6 @@ # github.com/NYTimes/gziphandler v1.1.1 github.com/NYTimes/gziphandler # github.com/SkycoinProject/skycoin v0.27.0 -github.com/SkycoinProject/skycoin/src/api github.com/SkycoinProject/skycoin/src/cipher github.com/SkycoinProject/skycoin/src/cipher/base58 github.com/SkycoinProject/skycoin/src/cipher/bip32 @@ -10,7 +9,6 @@ github.com/SkycoinProject/skycoin/src/cipher/bip39/wordlists github.com/SkycoinProject/skycoin/src/cipher/bip44 github.com/SkycoinProject/skycoin/src/cipher/chacha20poly1305 github.com/SkycoinProject/skycoin/src/cipher/chacha20poly1305/internal/chacha20 -github.com/SkycoinProject/skycoin/src/cipher/encoder github.com/SkycoinProject/skycoin/src/cipher/encrypt github.com/SkycoinProject/skycoin/src/cipher/pbkdf2 github.com/SkycoinProject/skycoin/src/cipher/poly1305 @@ -18,43 +16,12 @@ github.com/SkycoinProject/skycoin/src/cipher/ripemd160 github.com/SkycoinProject/skycoin/src/cipher/scrypt github.com/SkycoinProject/skycoin/src/cipher/secp256k1-go github.com/SkycoinProject/skycoin/src/cipher/secp256k1-go/secp256k1-go2 -github.com/SkycoinProject/skycoin/src/coin -github.com/SkycoinProject/skycoin/src/daemon -github.com/SkycoinProject/skycoin/src/daemon/gnet -github.com/SkycoinProject/skycoin/src/daemon/pex -github.com/SkycoinProject/skycoin/src/daemon/strand -github.com/SkycoinProject/skycoin/src/kvstorage -github.com/SkycoinProject/skycoin/src/params -github.com/SkycoinProject/skycoin/src/readable -github.com/SkycoinProject/skycoin/src/testutil -github.com/SkycoinProject/skycoin/src/transaction +github.com/SkycoinProject/skycoin/src/util/apputil github.com/SkycoinProject/skycoin/src/util/droplet -github.com/SkycoinProject/skycoin/src/util/elapse -github.com/SkycoinProject/skycoin/src/util/fee github.com/SkycoinProject/skycoin/src/util/file github.com/SkycoinProject/skycoin/src/util/http -github.com/SkycoinProject/skycoin/src/util/iputil github.com/SkycoinProject/skycoin/src/util/logging github.com/SkycoinProject/skycoin/src/util/mathutil -github.com/SkycoinProject/skycoin/src/util/timeutil -github.com/SkycoinProject/skycoin/src/util/useragent -github.com/SkycoinProject/skycoin/src/visor -github.com/SkycoinProject/skycoin/src/visor/blockdb -github.com/SkycoinProject/skycoin/src/visor/dbutil -github.com/SkycoinProject/skycoin/src/visor/historydb -github.com/SkycoinProject/skycoin/src/wallet -# github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 -github.com/StackExchange/wmi -# github.com/VictoriaMetrics/fastcache v1.5.3 -github.com/VictoriaMetrics/fastcache -# github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 -github.com/aristanetworks/goarista/monotime -# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 -github.com/beorn7/perks/quantile -# github.com/blang/semver v3.5.1+incompatible -github.com/blang/semver -# github.com/boltdb/bolt v1.3.1 -github.com/boltdb/bolt # github.com/btcsuite/btcd v0.20.1-beta github.com/btcsuite/btcd/btcec github.com/btcsuite/btcd/btcjson @@ -72,81 +39,29 @@ github.com/btcsuite/btcutil/bech32 github.com/btcsuite/go-socks/socks # github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 github.com/btcsuite/websocket -# github.com/cenkalti/backoff v2.2.1+incompatible -github.com/cenkalti/backoff -# github.com/cespare/xxhash/v2 v2.1.1 -github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa -github.com/elastic/gosigar -github.com/elastic/gosigar/sys/windows # github.com/ethereum/go-ethereum v1.9.12 -github.com/ethereum/go-ethereum github.com/ethereum/go-ethereum/common github.com/ethereum/go-ethereum/common/hexutil github.com/ethereum/go-ethereum/common/math -github.com/ethereum/go-ethereum/common/mclock -github.com/ethereum/go-ethereum/common/prque -github.com/ethereum/go-ethereum/core/types github.com/ethereum/go-ethereum/crypto github.com/ethereum/go-ethereum/crypto/secp256k1 -github.com/ethereum/go-ethereum/ethdb -github.com/ethereum/go-ethereum/log -github.com/ethereum/go-ethereum/metrics -github.com/ethereum/go-ethereum/params github.com/ethereum/go-ethereum/rlp -github.com/ethereum/go-ethereum/trie -# github.com/go-ole/go-ole v1.2.1 -github.com/go-ole/go-ole -github.com/go-ole/go-ole/oleutil -# github.com/go-stack/stack v1.8.0 -github.com/go-stack/stack -# github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c -github.com/golang/protobuf/proto -# github.com/golang/snappy v0.0.1 -github.com/golang/snappy # github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/konsorten/go-windows-terminal-sequences # github.com/mattn/go-colorable v0.1.0 github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions v1.0.1 -github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b github.com/mgutz/ansi -# github.com/pkg/errors v0.8.1 -github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v0.9.1 -github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal -github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 -github.com/prometheus/client_model/go -# github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce -github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -github.com/prometheus/common/model -# github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d -github.com/prometheus/procfs -github.com/prometheus/procfs/internal/util -github.com/prometheus/procfs/nfs -github.com/prometheus/procfs/xfs -# github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 -github.com/rs/cors -# github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 -github.com/rs/xhandler # github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc github.com/shopspring/decimal # github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus -# github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 -github.com/steakknife/bloomfilter -# github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 -github.com/steakknife/hamming # github.com/stretchr/testify v1.5.1 github.com/stretchr/testify/assert github.com/stretchr/testify/require @@ -154,8 +69,6 @@ github.com/stretchr/testify/require golang.org/x/crypto/ripemd160 golang.org/x/crypto/sha3 golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20200301022130-244492dfa37a -golang.org/x/net/context # golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 golang.org/x/sys/cpu golang.org/x/sys/unix From f2b690951ba874ffb43f979c1730a2577ee6fde9 Mon Sep 17 00:00:00 2001 From: therealssj Date: Sat, 28 Mar 2020 16:28:46 +0530 Subject: [PATCH 2/2] update readme --- README.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 814092d..5dda9b2 100644 --- a/README.md +++ b/README.md @@ -1 +1,20 @@ -# multicoin-wallet \ No newline at end of file +# multicoin-wallet + +- Proxy: forward requests to interal btcd / btcwallet api +- Multicoin: serve multicoin api + + +### Wallet: +The wallet struture is copied over from skycoin with some modifications. + +The available wallets are: +1) Skycoin Deterministic Wallet (Sequential Deterministic Wallet) +2) Bip44 Wallet +3) Collections Wallet +4) XPub Wallet ( Watch only wallets ) + + +> Note: The public keys for skycoin and bitcoin are compressed public keys while eth pubkeys are uncompressed. + + +