From 41032137ac438b9e2d4c28c11097ef9bccf8439e Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 5 Mar 2024 16:53:40 +0000 Subject: [PATCH 001/656] Merge bitcoin/bitcoin#29541: test: remove file-wide interpreter.cpp ubsan suppression 217c0ce552a5d519b5cc702aba0c82514a1c449e test: remove file-wide interpreter.cpp ubsan suppression (fanquake) Pull request description: ACKs for top commit: Sjors: utACK 217c0ce552a5d519b5cc702aba0c82514a1c449e hebasto: ACK 217c0ce552a5d519b5cc702aba0c82514a1c449e. dergoegge: ACK 217c0ce552a5d519b5cc702aba0c82514a1c449e Tree-SHA512: ae0c2ff4531fdb7b0296709f66b71d4065fe3f32cbd39a44e45934a975b5cf6cf01c2f136f110753efee8e301636f7700278aed1d995b463fc025c07d586a8fa --- test/sanitizer_suppressions/ubsan | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 0058d775746c..79507945434f 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -51,7 +51,7 @@ unsigned-integer-overflow:hash.cpp unsigned-integer-overflow:policy/fees.cpp unsigned-integer-overflow:prevector.h unsigned-integer-overflow:pubkey.h -unsigned-integer-overflow:script/interpreter.cpp +unsigned-integer-overflow:EvalScript unsigned-integer-overflow:txmempool.cpp unsigned-integer-overflow:util/strencodings.cpp unsigned-integer-overflow:xoroshiro128plusplus.h From cfb21656c9a3026f07e234f5b5f1643b3c1fabcb Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 20 Oct 2023 14:22:15 +0100 Subject: [PATCH 002/656] Merge bitcoin/bitcoin#28569: log: Don't log cache rebalancing in absense of a snapshot chainstate ec84f999f1408b7f1ff4498f78c33b34c30e934c log: Don't log cache rebalancing in absense of a snapshot chainstate (Fabian Jahr) Pull request description: I have noticed that this log now is always printed, even if there is no snapshot chainstate present or even was present. I think this is confusing to users that have never even thought about using assumeutxo since in that case the rebalancing is just ensuring the normal environment with one chainstate. So I suggest we don't log in absence of a snapshot chainstate. We could also think about rewording the message instead but I think this is simpler. ACKs for top commit: stickies-v: utACK ec84f999f1408b7f1ff4498f78c33b34c30e934c glozow: concept ACK ec84f999f1408b7f1ff4498f78c33b34c30e934c, don't have opinions other than removing confusing log theStack: utACK ec84f999f1408b7f1ff4498f78c33b34c30e934c Tree-SHA512: 30bbfc648e7c788106f78d52e47a3aa1e1874f65d13743643dc50bcf7f450d8330711ff9fdeac361722542da6051533153829c6d49033227ed315e111afc899f --- src/validation.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 9a87546266d2..47f22053b226 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5931,8 +5931,8 @@ void ChainstateManager::MaybeRebalanceCaches() { AssertLockHeld(::cs_main); if (m_ibd_chainstate && !m_snapshot_chainstate) { - LogPrintf("[snapshot] allocating all cache to the IBD chainstate\n"); - // Allocate everything to the IBD chainstate. + // Allocate everything to the IBD chainstate. This will always happen + // when we are not using a snapshot m_ibd_chainstate->ResizeCoinsCaches(m_total_coinstip_cache, m_total_coinsdb_cache); } else if (m_snapshot_chainstate && !m_ibd_chainstate) { From d18beae7479dca386269c7bbe2f02cb2e515c50e Mon Sep 17 00:00:00 2001 From: PastaBot <156604295+DashCoreAutoGuix@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:33:02 -0500 Subject: [PATCH 003/656] Merge bitcoin/bitcoin#29414: doc: Update translation process guide Co-authored-by: fanquake Co-authored-by: pasta --- doc/translation_process.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/doc/translation_process.md b/doc/translation_process.md index 529dada5fe32..894bf6c2c61f 100644 --- a/doc/translation_process.md +++ b/doc/translation_process.md @@ -45,23 +45,7 @@ Visit the [Transifex Signup](https://app.transifex.com/signup/) page to create a You can find the Dash translation project at . ### Installing the Transifex client command-line tool -The client is used to fetch updated translations. If you are having problems, or need more details, see . - -**For Linux and Mac** - -`pip install transifex-client` - -Setup your Transifex client config as follows. Please *ignore the token field*. - -```ini -nano ~/.transifexrc - -[https://www.transifex.com] -hostname = https://www.transifex.com -password = PASSWORD -token = -username = USERNAME -``` +The client is used to fetch updated translations. Please check installation instructions and any other details at . **For Windows** From ad4c544000471a49c968c0d0bf6eb8173e912595 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 18 Apr 2023 09:07:33 +0100 Subject: [PATCH 004/656] Merge bitcoin/bitcoin#27477: test: add regression tests for #27468 (invalid URI segfaults) 6a77d290da589bd5620585def5bfc019e242e189 test: add regression tests for #27468 (invalid URI segfaults) (Sebastian Falbesoner) Pull request description: Prior to PR #27468 (commit 11422cc5720c8d73a87600de8fe8abb156db80dc) all call-sites of `GetQueryParameter(...)` in the REST module could trigger a crash. Add missing test cases for all possible code-paths as a regression test, as a foundation for possible follow-up fixes (which aim to resolve this issue in a more general and robust way). ACKs for top commit: stickies-v: ACK 6a77d290da589bd5620585def5bfc019e242e189 vasild: ACK 6a77d290da589bd5620585def5bfc019e242e189 Tree-SHA512: b5dd22d7d448f92236575ea950287259795a957a3f8e364682510c7c1ede5f9d67e7daccc5146c8d0817bcb71742d49273801574bd2bb96e44a9ae5a006ac2a7 --- test/functional/interface_rest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 5f63718c4bba..a85c8f16a345 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -281,6 +281,11 @@ def run_test(self): assert_equal(len(json_obj), 1) # ensure that there is one header in the json response assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same + # Check invalid uri (% symbol at the end of the request) + for invalid_uri in [f"/headers/{bb_hash}%", f"/blockfilterheaders/basic/{bb_hash}%", "/mempool/contents.json?%"]: + resp = self.test_rest_request(invalid_uri, ret_type=RetType.OBJ, status=400) + assert_equal(resp.read().decode('utf-8').rstrip(), "URI parsing failed, it likely contained RFC 3986 invalid characters") + # Compare with normal RPC block response rpc_block_json = self.nodes[0].getblock(bb_hash) for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']: From 9e9ee3554b341fab7e19a859524a443b8bc5aac8 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 30 Sep 2025 18:56:01 +0000 Subject: [PATCH 005/656] stats: move implementation to source file, define interface Also add some basic docs for RawSender and privatize functions --- src/bench/checkblock.cpp | 6 ++- src/init.cpp | 2 +- src/stats/client.cpp | 99 +++++++++++++++++++--------------- src/stats/client.h | 49 ++++++----------- src/stats/rawsender.h | 12 +++++ src/test/util/setup_common.cpp | 2 +- 6 files changed, 90 insertions(+), 80 deletions(-) diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp index b4ff0ee3373f..5e987bcefc66 100644 --- a/src/bench/checkblock.cpp +++ b/src/bench/checkblock.cpp @@ -11,6 +11,8 @@ #include #include +#include + // These are the two major time-sinks which happen after we have fully received // a block off the wire, but before we can relay the block on to peers using // compact block relay. @@ -38,8 +40,8 @@ static void DeserializeAndCheckBlockTest(benchmark::Bench& bench) ArgsManager bench_args; const auto chainParams = CreateChainParams(bench_args, CBaseChainParams::MAIN); // CheckBlock calls g_stats_client internally, we aren't using a testing setup - // so we need to do this manually. - ::g_stats_client = InitStatsClient(bench_args); + // so we need to do this manually. We can use the stub interface for this. + ::g_stats_client = std::make_unique(); bench.unit("block").run([&] { CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here diff --git a/src/init.cpp b/src/init.cpp index d9d2cbde48ec..2fc3606de8d7 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1587,7 +1587,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // We need to initialize g_stats_client early as currently, g_stats_client is called // regardless of whether transmitting stats are desirable or not and if // g_stats_client isn't present when that attempt is made, the client will crash. - ::g_stats_client = InitStatsClient(args); + ::g_stats_client = StatsdClient::make(args); { diff --git a/src/stats/client.cpp b/src/stats/client.cpp index 7eb3c962c7dd..b473e17100dc 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -7,9 +7,12 @@ #include #include +#include #include +#include #include +#include #include #include @@ -27,11 +30,53 @@ static constexpr char STATSD_METRIC_COUNT[]{"c"}; static constexpr char STATSD_METRIC_GAUGE[]{"g"}; /** Characters used to denote Statsd message type as timing */ static constexpr char STATSD_METRIC_TIMING[]{"ms"}; + +class StatsdClientImpl final : public StatsdClient +{ +public: + explicit StatsdClientImpl(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, + const std::string& prefix, const std::string& suffix); + ~StatsdClientImpl() = default; + +public: + bool dec(const std::string& key, float sample_rate) override { return count(key, -1, sample_rate); } + bool inc(const std::string& key, float sample_rate) override { return count(key, 1, sample_rate); } + bool count(const std::string& key, int64_t delta, float sample_rate) override { return _send(key, delta, STATSD_METRIC_COUNT, sample_rate); } + bool gauge(const std::string& key, int64_t value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } + bool gaugeDouble(const std::string& key, double value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } + bool timing(const std::string& key, uint64_t ms, float sample_rate) override { return _send(key, ms, STATSD_METRIC_TIMING, sample_rate); } + + bool send(const std::string& key, double value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(const std::string& key, int32_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(const std::string& key, int64_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(const std::string& key, uint32_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(const std::string& key, uint64_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + + bool active() const override { return m_sender != nullptr; } + +private: + template + inline bool _send(const std::string& key, T1 value, const std::string& type, float sample_rate); + +private: + /* Mutex to protect PRNG */ + mutable Mutex cs; + /* PRNG used to dice-roll messages that are 0 < f < 1 */ + mutable FastRandomContext insecure_rand GUARDED_BY(cs); + + /* Broadcasts messages crafted by StatsdClient */ + std::unique_ptr m_sender{nullptr}; + + /* Phrase prepended to keys */ + const std::string m_prefix{}; + /* Phrase appended to keys */ + const std::string m_suffix{}; +}; } // anonymous namespace std::unique_ptr g_stats_client; -std::unique_ptr InitStatsClient(const ArgsManager& args) +std::unique_ptr StatsdClient::make(const ArgsManager& args) { auto sanitize_string = [](std::string string) { // Remove key delimiters from the front and back as they're added back in @@ -43,16 +88,18 @@ std::unique_ptr InitStatsClient(const ArgsManager& args) return string; }; - return std::make_unique(args.GetArg("-statshost", DEFAULT_STATSD_HOST), - args.GetIntArg("-statsport", DEFAULT_STATSD_PORT), - args.GetIntArg("-statsbatchsize", DEFAULT_STATSD_BATCH_SIZE), - args.GetIntArg("-statsduration", DEFAULT_STATSD_DURATION), - sanitize_string(args.GetArg("-statsprefix", DEFAULT_STATSD_PREFIX)), - sanitize_string(args.GetArg("-statssuffix", DEFAULT_STATSD_SUFFIX))); + return std::make_unique( + args.GetArg("-statshost", DEFAULT_STATSD_HOST), + args.GetIntArg("-statsport", DEFAULT_STATSD_PORT), + args.GetIntArg("-statsbatchsize", DEFAULT_STATSD_BATCH_SIZE), + args.GetIntArg("-statsduration", DEFAULT_STATSD_DURATION), + sanitize_string(args.GetArg("-statsprefix", DEFAULT_STATSD_PREFIX)), + sanitize_string(args.GetArg("-statssuffix", DEFAULT_STATSD_SUFFIX)) + ); } -StatsdClient::StatsdClient(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, - const std::string& prefix, const std::string& suffix) : +StatsdClientImpl::StatsdClientImpl(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, + const std::string& prefix, const std::string& suffix) : m_prefix{[prefix]() { return !prefix.empty() ? prefix + STATSD_NS_DELIMITER : prefix; }()}, m_suffix{[suffix]() { return !suffix.empty() ? STATSD_NS_DELIMITER + suffix : suffix; }()} { @@ -74,34 +121,8 @@ StatsdClient::StatsdClient(const std::string& host, uint16_t port, uint64_t batc LogPrintf("StatsdClient initialized to transmit stats to %s:%d\n", host, port); } -StatsdClient::~StatsdClient() {} - -bool StatsdClient::dec(const std::string& key, float sample_rate) { return count(key, -1, sample_rate); } - -bool StatsdClient::inc(const std::string& key, float sample_rate) { return count(key, 1, sample_rate); } - -bool StatsdClient::count(const std::string& key, int64_t delta, float sample_rate) -{ - return send(key, delta, STATSD_METRIC_COUNT, sample_rate); -} - -bool StatsdClient::gauge(const std::string& key, int64_t value, float sample_rate) -{ - return send(key, value, STATSD_METRIC_GAUGE, sample_rate); -} - -bool StatsdClient::gaugeDouble(const std::string& key, double value, float sample_rate) -{ - return send(key, value, STATSD_METRIC_GAUGE, sample_rate); -} - -bool StatsdClient::timing(const std::string& key, uint64_t ms, float sample_rate) -{ - return send(key, ms, STATSD_METRIC_TIMING, sample_rate); -} - template -bool StatsdClient::send(const std::string& key, T1 value, const std::string& type, float sample_rate) +inline bool StatsdClientImpl::_send(const std::string& key, T1 value, const std::string& type, float sample_rate) { static_assert(std::is_arithmetic::value, "Must specialize to an arithmetic type"); @@ -132,9 +153,3 @@ bool StatsdClient::send(const std::string& key, T1 value, const std::string& typ return true; } - -template bool StatsdClient::send(const std::string& key, double value, const std::string& type, float sample_rate); -template bool StatsdClient::send(const std::string& key, int32_t value, const std::string& type, float sample_rate); -template bool StatsdClient::send(const std::string& key, int64_t value, const std::string& type, float sample_rate); -template bool StatsdClient::send(const std::string& key, uint32_t value, const std::string& type, float sample_rate); -template bool StatsdClient::send(const std::string& key, uint64_t value, const std::string& type, float sample_rate); diff --git a/src/stats/client.h b/src/stats/client.h index e5e8fe7c9a98..7cd819c4dfcd 100644 --- a/src/stats/client.h +++ b/src/stats/client.h @@ -7,14 +7,11 @@ #ifndef BITCOIN_STATS_CLIENT_H #define BITCOIN_STATS_CLIENT_H -#include -#include - +#include #include #include class ArgsManager; -class RawSender; /** Default port used to connect to a Statsd server */ static constexpr uint16_t DEFAULT_STATSD_PORT{8125}; @@ -39,44 +36,28 @@ static constexpr int MAX_STATSD_PERIOD{60 * 60}; class StatsdClient { public: - explicit StatsdClient(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, - const std::string& prefix, const std::string& suffix); - ~StatsdClient(); + static std::unique_ptr make(const ArgsManager& args); + virtual ~StatsdClient() = default; -public: /* Statsd-defined APIs */ - bool dec(const std::string& key, float sample_rate = 1.f); - bool inc(const std::string& key, float sample_rate = 1.f); - bool count(const std::string& key, int64_t delta, float sample_rate = 1.f); - bool gauge(const std::string& key, int64_t value, float sample_rate = 1.f); - bool gaugeDouble(const std::string& key, double value, float sample_rate = 1.f); - bool timing(const std::string& key, uint64_t ms, float sample_rate = 1.f); + virtual bool dec(const std::string& key, float sample_rate = 1.f) { return false; } + virtual bool inc(const std::string& key, float sample_rate = 1.f) { return false; } + virtual bool count(const std::string& key, int64_t delta, float sample_rate = 1.f) { return false; } + virtual bool gauge(const std::string& key, int64_t value, float sample_rate = 1.f) { return false; } + virtual bool gaugeDouble(const std::string& key, double value, float sample_rate = 1.f) { return false; } + virtual bool timing(const std::string& key, uint64_t ms, float sample_rate = 1.f) { return false; } /* Statsd-compatible APIs */ - template - bool send(const std::string& key, T1 value, const std::string& type, float sample_rate = 1.f); + virtual bool send(const std::string& key, double value, const std::string& type, float sample_rate = 1.f) { return false; } + virtual bool send(const std::string& key, int32_t value, const std::string& type, float sample_rate = 1.f) { return false; } + virtual bool send(const std::string& key, int64_t value, const std::string& type, float sample_rate = 1.f) { return false; } + virtual bool send(const std::string& key, uint32_t value, const std::string& type, float sample_rate = 1.f) { return false; } + virtual bool send(const std::string& key, uint64_t value, const std::string& type, float sample_rate = 1.f) { return false; } /* Check if a StatsdClient instance is ready to send messages */ - bool active() const { return m_sender != nullptr; } - -private: - /* Mutex to protect PRNG */ - mutable Mutex cs; - /* PRNG used to dice-roll messages that are 0 < f < 1 */ - mutable FastRandomContext insecure_rand GUARDED_BY(cs); - - /* Broadcasts messages crafted by StatsdClient */ - std::unique_ptr m_sender{nullptr}; - - /* Phrase prepended to keys */ - const std::string m_prefix{""}; - /* Phrase appended to keys */ - const std::string m_suffix{""}; + virtual bool active() const { return false; } }; -/** Parses arguments and constructs a StatsdClient instance */ -std::unique_ptr InitStatsClient(const ArgsManager& args); - /** Global smart pointer containing StatsdClient instance */ extern std::unique_ptr g_stats_client; diff --git a/src/stats/rawsender.h b/src/stats/rawsender.h index 65941f80dc5a..c6cdc0882018 100644 --- a/src/stats/rawsender.h +++ b/src/stats/rawsender.h @@ -63,14 +63,26 @@ class RawSender RawSender& operator=(const RawSender&) = delete; RawSender(RawSender&&) = delete; + //! Request a message to be sent based on configuration (queueing, batching) std::optional Send(const RawMessage& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs); + +private: + //! Send a message directly using ::send{,to}() std::optional SendDirectly(const RawMessage& msg); + //! Get target server address as string std::string ToStringHostPort() const; + //! Add message to queue void QueueAdd(const RawMessage& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs); + + //! Send all messages in queue of RawSender entity and flush it void QueueFlush() EXCLUSIVE_LOCKS_REQUIRED(!cs); + + //! Send all messages in given queue and flush it void QueueFlush(std::deque& queue); + + //! Worker thread function if queueing is requested void QueueThreadMain() EXCLUSIVE_LOCKS_REQUIRED(!cs); private: diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 423abe4860d3..9f99263e82a8 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -188,7 +188,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve SetupNetworking(); InitSignatureCache(); InitScriptExecutionCache(); - ::g_stats_client = InitStatsClient(*m_node.args); + ::g_stats_client = StatsdClient::make(*m_node.args); m_node.chain = interfaces::MakeChain(m_node); m_node.netgroupman = std::make_unique(/*asmap=*/std::vector()); From 1508f4a82810d4dcb267e74f4e7abba7f1a99265 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 1 Sep 2025 03:30:04 +0000 Subject: [PATCH 006/656] stats: use string_view for fixed values --- src/stats/client.cpp | 28 ++++++++++++++-------------- src/stats/client.h | 23 ++++++++++++----------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/stats/client.cpp b/src/stats/client.cpp index b473e17100dc..c5664606ea1d 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -39,24 +39,24 @@ class StatsdClientImpl final : public StatsdClient ~StatsdClientImpl() = default; public: - bool dec(const std::string& key, float sample_rate) override { return count(key, -1, sample_rate); } - bool inc(const std::string& key, float sample_rate) override { return count(key, 1, sample_rate); } - bool count(const std::string& key, int64_t delta, float sample_rate) override { return _send(key, delta, STATSD_METRIC_COUNT, sample_rate); } - bool gauge(const std::string& key, int64_t value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } - bool gaugeDouble(const std::string& key, double value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } - bool timing(const std::string& key, uint64_t ms, float sample_rate) override { return _send(key, ms, STATSD_METRIC_TIMING, sample_rate); } - - bool send(const std::string& key, double value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } - bool send(const std::string& key, int32_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } - bool send(const std::string& key, int64_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } - bool send(const std::string& key, uint32_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } - bool send(const std::string& key, uint64_t value, const std::string& type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool dec(std::string_view key, float sample_rate) override { return count(key, -1, sample_rate); } + bool inc(std::string_view key, float sample_rate) override { return count(key, 1, sample_rate); } + bool count(std::string_view key, int64_t delta, float sample_rate) override { return _send(key, delta, STATSD_METRIC_COUNT, sample_rate); } + bool gauge(std::string_view key, int64_t value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } + bool gaugeDouble(std::string_view key, double value, float sample_rate) override { return _send(key, value, STATSD_METRIC_GAUGE, sample_rate); } + bool timing(std::string_view key, uint64_t ms, float sample_rate) override { return _send(key, ms, STATSD_METRIC_TIMING, sample_rate); } + + bool send(std::string_view key, double value, std::string_view type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(std::string_view key, int32_t value, std::string_view type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(std::string_view key, int64_t value, std::string_view type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(std::string_view key, uint32_t value, std::string_view type, float sample_rate) override { return _send(key, value, type, sample_rate); } + bool send(std::string_view key, uint64_t value, std::string_view type, float sample_rate) override { return _send(key, value, type, sample_rate); } bool active() const override { return m_sender != nullptr; } private: template - inline bool _send(const std::string& key, T1 value, const std::string& type, float sample_rate); + inline bool _send(std::string_view key, T1 value, std::string_view type, float sample_rate); private: /* Mutex to protect PRNG */ @@ -122,7 +122,7 @@ StatsdClientImpl::StatsdClientImpl(const std::string& host, uint16_t port, uint6 } template -inline bool StatsdClientImpl::_send(const std::string& key, T1 value, const std::string& type, float sample_rate) +inline bool StatsdClientImpl::_send(std::string_view key, T1 value, std::string_view type, float sample_rate) { static_assert(std::is_arithmetic::value, "Must specialize to an arithmetic type"); diff --git a/src/stats/client.h b/src/stats/client.h index 7cd819c4dfcd..b7b4440b6baf 100644 --- a/src/stats/client.h +++ b/src/stats/client.h @@ -10,6 +10,7 @@ #include #include #include +#include class ArgsManager; @@ -40,19 +41,19 @@ class StatsdClient virtual ~StatsdClient() = default; /* Statsd-defined APIs */ - virtual bool dec(const std::string& key, float sample_rate = 1.f) { return false; } - virtual bool inc(const std::string& key, float sample_rate = 1.f) { return false; } - virtual bool count(const std::string& key, int64_t delta, float sample_rate = 1.f) { return false; } - virtual bool gauge(const std::string& key, int64_t value, float sample_rate = 1.f) { return false; } - virtual bool gaugeDouble(const std::string& key, double value, float sample_rate = 1.f) { return false; } - virtual bool timing(const std::string& key, uint64_t ms, float sample_rate = 1.f) { return false; } + virtual bool dec(std::string_view key, float sample_rate = 1.f) { return false; } + virtual bool inc(std::string_view key, float sample_rate = 1.f) { return false; } + virtual bool count(std::string_view key, int64_t delta, float sample_rate = 1.f) { return false; } + virtual bool gauge(std::string_view key, int64_t value, float sample_rate = 1.f) { return false; } + virtual bool gaugeDouble(std::string_view key, double value, float sample_rate = 1.f) { return false; } + virtual bool timing(std::string_view key, uint64_t ms, float sample_rate = 1.f) { return false; } /* Statsd-compatible APIs */ - virtual bool send(const std::string& key, double value, const std::string& type, float sample_rate = 1.f) { return false; } - virtual bool send(const std::string& key, int32_t value, const std::string& type, float sample_rate = 1.f) { return false; } - virtual bool send(const std::string& key, int64_t value, const std::string& type, float sample_rate = 1.f) { return false; } - virtual bool send(const std::string& key, uint32_t value, const std::string& type, float sample_rate = 1.f) { return false; } - virtual bool send(const std::string& key, uint64_t value, const std::string& type, float sample_rate = 1.f) { return false; } + virtual bool send(std::string_view key, double value, std::string_view type, float sample_rate = 1.f) { return false; } + virtual bool send(std::string_view key, int32_t value, std::string_view type, float sample_rate = 1.f) { return false; } + virtual bool send(std::string_view key, int64_t value, std::string_view type, float sample_rate = 1.f) { return false; } + virtual bool send(std::string_view key, uint32_t value, std::string_view type, float sample_rate = 1.f) { return false; } + virtual bool send(std::string_view key, uint64_t value, std::string_view type, float sample_rate = 1.f) { return false; } /* Check if a StatsdClient instance is ready to send messages */ virtual bool active() const { return false; } From 8d7e74fb8f1e86b568e91ce5f7a3780e7b2b61dc Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 30 Sep 2025 18:38:25 +0000 Subject: [PATCH 007/656] stats: bubble errors up, halt init if stats client reports errors Also we should initialize the Statsd client *after* setting g_socket_events_mode or risk a crash when we try to establish a connection later. --- src/init.cpp | 16 ++++++++---- src/stats/client.cpp | 46 ++++++++++++++++++---------------- src/stats/client.h | 4 ++- src/stats/rawsender.cpp | 12 ++++----- src/stats/rawsender.h | 5 ++-- src/test/util/setup_common.cpp | 8 +++++- 6 files changed, 54 insertions(+), 37 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 2fc3606de8d7..7411cd5b4168 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1584,11 +1584,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) fDiscover = args.GetBoolArg("-discover", true); const bool ignores_incoming_txs{args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)}; - // We need to initialize g_stats_client early as currently, g_stats_client is called - // regardless of whether transmitting stats are desirable or not and if - // g_stats_client isn't present when that attempt is made, the client will crash. - ::g_stats_client = StatsdClient::make(args); - { // Read asmap file if configured @@ -1631,6 +1626,17 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) return InitError(strprintf(_("Invalid -socketevents ('%s') specified. Only these modes are supported: %s"), sem_str, GetSupportedSocketEventsStr())); } + // We need to initialize g_stats_client early as currently, g_stats_client is called + // regardless of whether transmitting stats are desirable or not and if + // g_stats_client isn't present when that attempt is made, the client will crash. + { + auto stats_client = StatsdClient::make(args); + if (!stats_client) { + return InitError(_("Cannot init Statsd client") + Untranslated(" (") + util::ErrorString(stats_client) + Untranslated(")")); + } + ::g_stats_client = std::move(*stats_client); + } + assert(!node.banman); node.banman = std::make_unique(gArgs.GetDataDirNet() / "banlist", &uiInterface, args.GetIntArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); assert(!node.connman); diff --git a/src/stats/client.cpp b/src/stats/client.cpp index c5664606ea1d..d46f24064a47 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #include #include @@ -35,7 +37,7 @@ class StatsdClientImpl final : public StatsdClient { public: explicit StatsdClientImpl(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, - const std::string& prefix, const std::string& suffix); + const std::string& prefix, const std::string& suffix, std::optional& error); ~StatsdClientImpl() = default; public: @@ -76,8 +78,14 @@ class StatsdClientImpl final : public StatsdClient std::unique_ptr g_stats_client; -std::unique_ptr StatsdClient::make(const ArgsManager& args) +util::Result> StatsdClient::make(const ArgsManager& args) { + const auto host = args.GetArg("-statshost", DEFAULT_STATSD_HOST); + if (host.empty()) { + LogPrintf("Transmitting stats are disabled, will not init Statsd client\n"); + return std::make_unique(); + } + auto sanitize_string = [](std::string string) { // Remove key delimiters from the front and back as they're added back in // the constructor @@ -88,32 +96,30 @@ std::unique_ptr StatsdClient::make(const ArgsManager& args) return string; }; - return std::make_unique( - args.GetArg("-statshost", DEFAULT_STATSD_HOST), - args.GetIntArg("-statsport", DEFAULT_STATSD_PORT), + std::optional error_opt; + auto statsd_ptr = std::make_unique( + host, args.GetIntArg("-statsport", DEFAULT_STATSD_PORT), args.GetIntArg("-statsbatchsize", DEFAULT_STATSD_BATCH_SIZE), args.GetIntArg("-statsduration", DEFAULT_STATSD_DURATION), sanitize_string(args.GetArg("-statsprefix", DEFAULT_STATSD_PREFIX)), - sanitize_string(args.GetArg("-statssuffix", DEFAULT_STATSD_SUFFIX)) - ); + sanitize_string(args.GetArg("-statssuffix", DEFAULT_STATSD_SUFFIX)), error_opt); + if (error_opt.has_value()) { + statsd_ptr.reset(); + return util::Error{error_opt.value()}; + } + return {std::move(statsd_ptr)}; } StatsdClientImpl::StatsdClientImpl(const std::string& host, uint16_t port, uint64_t batch_size, uint64_t interval_ms, - const std::string& prefix, const std::string& suffix) : + const std::string& prefix, const std::string& suffix, + std::optional& error) : m_prefix{[prefix]() { return !prefix.empty() ? prefix + STATSD_NS_DELIMITER : prefix; }()}, m_suffix{[suffix]() { return !suffix.empty() ? STATSD_NS_DELIMITER + suffix : suffix; }()} { - if (host.empty()) { - LogPrintf("Transmitting stats are disabled, will not init StatsdClient\n"); - return; - } - - std::optional error_opt; m_sender = std::make_unique(host, port, std::make_pair(batch_size, static_cast(STATSD_MSG_DELIMITER)), - interval_ms, error_opt); - if (error_opt.has_value()) { - LogPrintf("ERROR: %s, cannot initialize StatsdClient.\n", error_opt.value()); + interval_ms, error); + if (error.has_value()) { m_sender.reset(); return; } @@ -126,10 +132,6 @@ inline bool StatsdClientImpl::_send(std::string_view key, T1 value, std::string_ { static_assert(std::is_arithmetic::value, "Must specialize to an arithmetic type"); - if (!m_sender) { - return false; - } - // Determine if we should send the message at all but claim that we did even if we don't sample_rate = std::clamp(sample_rate, 0.f, 1.f); bool always_send = std::fabs(sample_rate - 1.f) < EPSILON; @@ -146,7 +148,7 @@ inline bool StatsdClientImpl::_send(std::string_view key, T1 value, std::string_ } // Send it and report an error if we encounter one - if (auto error_opt = m_sender->Send(msg); error_opt.has_value()) { + if (auto error_opt = Assert(m_sender)->Send(msg); error_opt.has_value()) { LogPrintf("ERROR: %s.\n", error_opt.value()); return false; } diff --git a/src/stats/client.h b/src/stats/client.h index b7b4440b6baf..b9484f8f5cb9 100644 --- a/src/stats/client.h +++ b/src/stats/client.h @@ -7,6 +7,8 @@ #ifndef BITCOIN_STATS_CLIENT_H #define BITCOIN_STATS_CLIENT_H +#include + #include #include #include @@ -37,7 +39,7 @@ static constexpr int MAX_STATSD_PERIOD{60 * 60}; class StatsdClient { public: - static std::unique_ptr make(const ArgsManager& args); + static util::Result> make(const ArgsManager& args); virtual ~StatsdClient() = default; /* Statsd-defined APIs */ diff --git a/src/stats/rawsender.cpp b/src/stats/rawsender.cpp index 8b56875f2fab..3f72e2207d10 100644 --- a/src/stats/rawsender.cpp +++ b/src/stats/rawsender.cpp @@ -12,34 +12,34 @@ #include RawSender::RawSender(const std::string& host, uint16_t port, std::pair batching_opts, - uint64_t interval_ms, std::optional& error) : + uint64_t interval_ms, std::optional& error) : m_host{host}, m_port{port}, m_batching_opts{batching_opts}, m_interval_ms{interval_ms} { if (host.empty()) { - error = "No host specified"; + error = _("No host specified"); return; } if (auto netaddr = LookupHost(m_host, /*fAllowLookup=*/true); netaddr.has_value()) { if (!netaddr->IsIPv4()) { - error = strprintf("Host %s on unsupported network", m_host); + error = strprintf(_("Host %s on unsupported network"), m_host); return; } if (!CService(*netaddr, port).GetSockAddr(reinterpret_cast(&m_server.first), &m_server.second)) { - error = strprintf("Cannot get socket address for %s", m_host); + error = strprintf(_("Cannot get socket address for %s"), m_host); return; } } else { - error = strprintf("Unable to lookup host %s", m_host); + error = strprintf(_("Unable to lookup host %s"), m_host); return; } SOCKET hSocket = ::socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); if (hSocket == INVALID_SOCKET) { - error = strprintf("Cannot create socket (socket() returned error %s)", NetworkErrorString(WSAGetLastError())); + error = strprintf(_("Cannot create socket (socket() returned error %s)"), NetworkErrorString(WSAGetLastError())); return; } m_sock = std::make_unique(hSocket); diff --git a/src/stats/rawsender.h b/src/stats/rawsender.h index c6cdc0882018..911e5d9a05a3 100644 --- a/src/stats/rawsender.h +++ b/src/stats/rawsender.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -55,8 +56,8 @@ struct RawMessage : public std::vector class RawSender { public: - RawSender(const std::string& host, uint16_t port, std::pair batching_opts, - uint64_t interval_ms, std::optional& error); + RawSender(const std::string& host, uint16_t port, std::pair batching_opts, uint64_t interval_ms, + std::optional& error); ~RawSender(); RawSender(const RawSender&) = delete; diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 9f99263e82a8..7942e3c2a57f 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -188,7 +188,6 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve SetupNetworking(); InitSignatureCache(); InitScriptExecutionCache(); - ::g_stats_client = StatsdClient::make(*m_node.args); m_node.chain = interfaces::MakeChain(m_node); m_node.netgroupman = std::make_unique(/*asmap=*/std::vector()); @@ -203,6 +202,13 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve strprintf("Invalid -socketevents ('%s') specified. Only these modes are supported: %s", sem_str, GetSupportedSocketEventsStr())); } + { + auto stats_client = StatsdClient::make(*m_node.args); + if (!stats_client) { + throw std::runtime_error{strprintf("Cannot init Statsd client (%s)", util::ErrorString(stats_client).original)}; + } + ::g_stats_client = std::move(*stats_client); + } m_node.connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman); // Deterministic randomness for tests. From fced9f66c7d37adfc85e75057cff1ae67aa0752c Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 30 Sep 2025 18:56:24 +0000 Subject: [PATCH 008/656] stats: add stricter validation for arguments --- src/stats/client.cpp | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/stats/client.cpp b/src/stats/client.cpp index d46f24064a47..2af9b5111f76 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -16,6 +16,7 @@ #include #include +#include #include namespace { @@ -86,6 +87,22 @@ util::Result> StatsdClient::make(const ArgsManager return std::make_unique(); } + const int64_t batch_size = args.GetIntArg("-statsbatchsize", DEFAULT_STATSD_BATCH_SIZE); + if (batch_size < 0) { + return util::Error{_("-statsbatchsize cannot be configured with a negative value.")}; + } + + const int64_t interval_ms = args.GetIntArg("-statsduration", DEFAULT_STATSD_DURATION); + if (interval_ms < 0) { + return util::Error{_("-statsduration cannot be configured with a negative value.")}; + } + + const int64_t port = args.GetIntArg("-statsport", DEFAULT_STATSD_PORT); + if (port < 1 || port > std::numeric_limits::max()) { + return util::Error{strprintf(_("Port must be between %d and %d, supplied %d"), 1, + std::numeric_limits::max(), port)}; + } + auto sanitize_string = [](std::string string) { // Remove key delimiters from the front and back as they're added back in // the constructor @@ -98,9 +115,7 @@ util::Result> StatsdClient::make(const ArgsManager std::optional error_opt; auto statsd_ptr = std::make_unique( - host, args.GetIntArg("-statsport", DEFAULT_STATSD_PORT), - args.GetIntArg("-statsbatchsize", DEFAULT_STATSD_BATCH_SIZE), - args.GetIntArg("-statsduration", DEFAULT_STATSD_DURATION), + host, port, batch_size, interval_ms, sanitize_string(args.GetArg("-statsprefix", DEFAULT_STATSD_PREFIX)), sanitize_string(args.GetArg("-statssuffix", DEFAULT_STATSD_SUFFIX)), error_opt); if (error_opt.has_value()) { From 131d536f27997a3218c60b007301516f76a800e4 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Fri, 5 Sep 2025 10:28:18 +0000 Subject: [PATCH 009/656] stats: use `bilingual_str` for RawSender error messages --- src/stats/client.cpp | 2 +- src/stats/rawsender.cpp | 8 ++++---- src/stats/rawsender.h | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/stats/client.cpp b/src/stats/client.cpp index 2af9b5111f76..5e2f61ae5c28 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -164,7 +164,7 @@ inline bool StatsdClientImpl::_send(std::string_view key, T1 value, std::string_ // Send it and report an error if we encounter one if (auto error_opt = Assert(m_sender)->Send(msg); error_opt.has_value()) { - LogPrintf("ERROR: %s.\n", error_opt.value()); + LogPrintf("ERROR: %s.\n", error_opt->original); return false; } diff --git a/src/stats/rawsender.cpp b/src/stats/rawsender.cpp index 3f72e2207d10..f849682d70c0 100644 --- a/src/stats/rawsender.cpp +++ b/src/stats/rawsender.cpp @@ -68,7 +68,7 @@ RawSender::~RawSender() m_host, m_port, m_successes, m_failures); } -std::optional RawSender::Send(const RawMessage& msg) +std::optional RawSender::Send(const RawMessage& msg) { // If there is a thread, append to queue if (m_thread.joinable()) { @@ -79,11 +79,11 @@ std::optional RawSender::Send(const RawMessage& msg) return SendDirectly(msg); } -std::optional RawSender::SendDirectly(const RawMessage& msg) +std::optional RawSender::SendDirectly(const RawMessage& msg) { if (!m_sock) { m_failures++; - return "Socket not initialized, cannot send message"; + return _("Socket not initialized, cannot send message"); } if (::sendto(m_sock->Get(), reinterpret_cast(msg.data()), @@ -94,7 +94,7 @@ std::optional RawSender::SendDirectly(const RawMessage& msg) #endif // WIN32 /*flags=*/0, reinterpret_cast(&m_server.first), m_server.second) == SOCKET_ERROR) { m_failures++; - return strprintf("Unable to send message to %s (sendto() returned error %s)", this->ToStringHostPort(), + return strprintf(_("Unable to send message to %s (::sendto() returned error %s)"), this->ToStringHostPort(), NetworkErrorString(WSAGetLastError())); } diff --git a/src/stats/rawsender.h b/src/stats/rawsender.h index 911e5d9a05a3..fc78ff650772 100644 --- a/src/stats/rawsender.h +++ b/src/stats/rawsender.h @@ -65,11 +65,11 @@ class RawSender RawSender(RawSender&&) = delete; //! Request a message to be sent based on configuration (queueing, batching) - std::optional Send(const RawMessage& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs); + std::optional Send(const RawMessage& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs); private: //! Send a message directly using ::send{,to}() - std::optional SendDirectly(const RawMessage& msg); + std::optional SendDirectly(const RawMessage& msg); //! Get target server address as string std::string ToStringHostPort() const; From 3640071485312397cd36326ce8c4a5b9734b08fe Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Wed, 16 Oct 2024 20:10:37 +0000 Subject: [PATCH 010/656] stats: extend connection support to IPv6 --- src/stats/rawsender.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/stats/rawsender.cpp b/src/stats/rawsender.cpp index f849682d70c0..74647eed131e 100644 --- a/src/stats/rawsender.cpp +++ b/src/stats/rawsender.cpp @@ -24,7 +24,7 @@ RawSender::RawSender(const std::string& host, uint16_t port, std::pairIsIPv4()) { + if (!netaddr->IsIPv4() && !netaddr->IsIPv6()) { error = strprintf(_("Host %s on unsupported network"), m_host); return; } @@ -37,7 +37,7 @@ RawSender::RawSender(const std::string& host, uint16_t port, std::pair(&m_server.first)->sa_family, SOCK_DGRAM, IPPROTO_UDP); if (hSocket == INVALID_SOCKET) { error = strprintf(_("Cannot create socket (socket() returned error %s)"), NetworkErrorString(WSAGetLastError())); return; From 592aa0408327ae4050f768750225d5922223e3af Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 9 Sep 2025 15:58:51 +0000 Subject: [PATCH 011/656] stats: add support for URLs in `statshost`, deprecate `statsport` --- src/init.cpp | 2 +- src/stats/client.cpp | 60 ++++++++++++++++++++++++++++++++++++++++---- src/stats/client.h | 2 -- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 7411cd5b4168..25b2ca72090c 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -783,7 +783,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-statsbatchsize=", strprintf("Specify the size of each batch of stats messages (default: %d)", DEFAULT_STATSD_BATCH_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); argsman.AddArg("-statsduration=", strprintf("Specify the number of milliseconds between stats messages (default: %d)", DEFAULT_STATSD_DURATION), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); argsman.AddArg("-statshost=", strprintf("Specify statsd host (default: %s)", DEFAULT_STATSD_HOST), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); - argsman.AddArg("-statsport=", strprintf("Specify statsd port (default: %u)", DEFAULT_STATSD_PORT), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); + hidden_args.emplace_back("-statsport"); argsman.AddArg("-statsperiod=", strprintf("Specify the number of seconds between periodic measurements (default: %d)", DEFAULT_STATSD_PERIOD), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); argsman.AddArg("-statsprefix=", strprintf("Specify an optional string prepended to every stats key (default: %s)", DEFAULT_STATSD_PREFIX), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); argsman.AddArg("-statssuffix=", strprintf("Specify an optional string appended to every stats key (default: %s)", DEFAULT_STATSD_SUFFIX), ArgsManager::ALLOW_ANY, OptionsCategory::STATSD); diff --git a/src/stats/client.cpp b/src/stats/client.cpp index 5e2f61ae5c28..d0e3a938e1e1 100644 --- a/src/stats/client.cpp +++ b/src/stats/client.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -22,6 +23,11 @@ namespace { /** Threshold below which a value is considered effectively zero */ static constexpr float EPSILON{0.0001f}; +/** Delimiter segmenting scheme from the rest of the URL */ +static constexpr std::string_view URL_SCHEME_DELIMITER{"://"}; + +/** Default port used to connect to a Statsd server */ +static constexpr uint16_t DEFAULT_STATSD_PORT{8125}; /** Delimiter segmenting two fully formed Statsd messages */ static constexpr char STATSD_MSG_DELIMITER{'\n'}; @@ -81,7 +87,7 @@ std::unique_ptr g_stats_client; util::Result> StatsdClient::make(const ArgsManager& args) { - const auto host = args.GetArg("-statshost", DEFAULT_STATSD_HOST); + auto host = args.GetArg("-statshost", DEFAULT_STATSD_HOST); if (host.empty()) { LogPrintf("Transmitting stats are disabled, will not init Statsd client\n"); return std::make_unique(); @@ -97,10 +103,54 @@ util::Result> StatsdClient::make(const ArgsManager return util::Error{_("-statsduration cannot be configured with a negative value.")}; } - const int64_t port = args.GetIntArg("-statsport", DEFAULT_STATSD_PORT); - if (port < 1 || port > std::numeric_limits::max()) { - return util::Error{strprintf(_("Port must be between %d and %d, supplied %d"), 1, - std::numeric_limits::max(), port)}; + auto port_arg = args.GetIntArg("-statsport", DEFAULT_STATSD_PORT); + if (args.IsArgSet("-statsport")) { + // Port range validation if -statsport is specified. + if (port_arg < 1 || port_arg > std::numeric_limits::max()) { + return util::Error{strprintf(_("Port must be between %d and %d, supplied %d"), 1, + std::numeric_limits::max(), port_arg)}; + } + } + uint16_t port = static_cast(port_arg); + + // Could be a URL, try to parse it. + const size_t scheme_idx{host.find(URL_SCHEME_DELIMITER)}; + if (scheme_idx != std::string::npos) { + // Parse the scheme and trim it out of the URL if we succeed + if (scheme_idx == 0) { + return util::Error{_("No text before the scheme delimiter, malformed URL")}; + } + std::string scheme{ToLower(host.substr(/*pos=*/0, scheme_idx))}; + if (scheme != "udp") { + return util::Error{_("Unsupported URL scheme, must begin with udp://")}; + } + host = host.substr(scheme_idx + URL_SCHEME_DELIMITER.length()); + + // Strip trailing slashes and parse the port + const size_t colon_idx{host.rfind(':')}; + if (colon_idx != std::string::npos) { + // Remove all forward slashes found after the port delimiter (colon) + host = std::string( + host.begin(), host.end() - [&colon_idx, &host]() { + const size_t slash_idx{host.find('/', /*pos=*/colon_idx + 1)}; + return slash_idx != std::string::npos ? host.length() - slash_idx : 0; + }()); + uint16_t port_url{0}; + SplitHostPort(host, port_url, host); + if (port_url != 0) { + if (args.IsArgSet("-statsport")) { + LogPrintf("%s: Supplied URL with port, ignoring -statsport\n", __func__); + } + port = port_url; + } + } else { + // There was no port specified, remove everything after the first forward slash + host = host.substr(/*pos=*/0, host.find("/")); + } + + if (host.empty()) { + return util::Error{_("No host specified, malformed URL")}; + } } auto sanitize_string = [](std::string string) { diff --git a/src/stats/client.h b/src/stats/client.h index b9484f8f5cb9..dad20111d9c7 100644 --- a/src/stats/client.h +++ b/src/stats/client.h @@ -16,8 +16,6 @@ class ArgsManager; -/** Default port used to connect to a Statsd server */ -static constexpr uint16_t DEFAULT_STATSD_PORT{8125}; /** Default host assumed to be running a Statsd server */ static const std::string DEFAULT_STATSD_HOST{""}; /** Default prefix prepended to Statsd message keys */ From b50c7f211e16778169fedc35cfe8b97906fd061a Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 30 Sep 2025 10:37:03 +0000 Subject: [PATCH 012/656] stats: add functional test for statsd reporting --- test/functional/feature_stats.py | 141 +++++++++++++++++++++++++++++++ test/functional/test_runner.py | 1 + 2 files changed, 142 insertions(+) create mode 100755 test/functional/feature_stats.py diff --git a/test/functional/feature_stats.py b/test/functional/feature_stats.py new file mode 100755 index 000000000000..64b4674b102c --- /dev/null +++ b/test/functional/feature_stats.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test stats reporting""" + +import queue +import socket +import time +import threading + +from test_framework.netutil import test_ipv6_local +from test_framework.test_framework import BitcoinTestFramework +from queue import Queue + +ONION_ADDR = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion" + +class StatsServer: + def __init__(self, host: str, port: int): + self.running = False + self.thread = None + self.queue: Queue[str] = Queue() + + addr_info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_DGRAM) + self.af = addr_info[0][0] + self.addr = (host, port) + + self.s = socket.socket(self.af, socket.SOCK_DGRAM) + self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.s.bind(self.addr) + self.s.settimeout(0.1) + + def run(self): + while self.running: + try: + data, _ = self.s.recvfrom(4096) + messages = data.decode('utf-8').strip().split('\n') + for msg in messages: + if msg: + self.queue.put(msg) + except socket.timeout: + continue + except Exception as e: + if self.running: + raise AssertionError("Unexpected exception raised: " + type(e).__name__) + + def start(self): + assert not self.running + self.running = True + self.thread = threading.Thread(target=self.run) + self.thread.daemon = True + self.thread.start() + + def stop(self): + self.running = False + if self.thread: + self.thread.join(timeout=2) + self.s.close() + + def assert_msg_received(self, expected_msg: str, timeout: int = 30): + deadline = time.time() + timeout + while time.time() < deadline: + try: + msg = self.queue.get(timeout=5) + if expected_msg in msg: + return + except queue.Empty: + continue + raise AssertionError(f"Did not receive message containing '{expected_msg}' within {timeout} seconds") + + +class StatsTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def run_test(self): + self.log.info("Test invalid command line options") + self.test_invalid_command_line_options() + + self.log.info("Test command line behavior") + self.test_command_behavior() + + self.log.info("Check that server can receive stats client messages") + self.have_ipv6 = test_ipv6_local() + self.test_conn('127.0.0.1') + if self.have_ipv6: + self.test_conn('::1') + else: + self.log.warning("Testing without local IPv6 support") + + def test_invalid_command_line_options(self): + self.stop_node(0) + self.nodes[0].assert_start_raises_init_error( + expected_msg='Error: Cannot init Statsd client (Port must be between 1 and 65535, supplied 65536)', + extra_args=['-statshost=127.0.0.1', '-statsport=65536'], + ) + self.nodes[0].assert_start_raises_init_error( + expected_msg='Error: Cannot init Statsd client (No text before the scheme delimiter, malformed URL)', + extra_args=['-statshost=://127.0.0.1'], + ) + self.nodes[0].assert_start_raises_init_error( + expected_msg='Error: Cannot init Statsd client (Unsupported URL scheme, must begin with udp://)', + extra_args=['-statshost=http://127.0.0.1'], + ) + self.nodes[0].assert_start_raises_init_error( + expected_msg='Error: Cannot init Statsd client (No host specified, malformed URL)', + extra_args=['-statshost=udp://'], + ) + self.nodes[0].assert_start_raises_init_error( + expected_msg=f'Error: Cannot init Statsd client (Host {ONION_ADDR} on unsupported network)', + extra_args=[f'-statshost=udp://{ONION_ADDR}'], + ) + + def test_command_behavior(self): + with self.nodes[0].assert_debug_log(expected_msgs=['Transmitting stats are disabled, will not init Statsd client']): + self.restart_node(0, extra_args=[]) + # The port specified in the URL supercedes -statsport + with self.nodes[0].assert_debug_log(expected_msgs=[ + 'Supplied URL with port, ignoring -statsport', + 'StatsdClient initialized to transmit stats to 127.0.0.1:8126', + 'Started threaded RawSender sending messages to 127.0.0.1:8126' + ]): + self.restart_node(0, extra_args=['-debug=net', '-statshost=udp://127.0.0.1:8126', '-statsport=8125']) + # Not specifying the port in the URL or -statsport will select the default port. Also, validate -statsduration behavior. + with self.nodes[0].assert_debug_log(expected_msgs=[ + 'Send interval is zero, not starting RawSender queueing thread', + 'StatsdClient initialized to transmit stats to 127.0.0.1:8125', + 'Started RawSender sending messages to 127.0.0.1:8125' + ]): + self.restart_node(0, extra_args=['-debug=net', '-statshost=udp://127.0.0.1', '-statsduration=0']) + + def test_conn(self, host: str): + server = StatsServer(host, 8125) + server.start() + self.restart_node(0, extra_args=[f'-statshost=udp://{host}', '-statsbatchsize=0', '-statsduration=0']) + server.assert_msg_received("CheckBlock_us") + server.stop() + +if __name__ == '__main__': + StatsTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 2c1a8597f535..02f1fde0f6b6 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -308,6 +308,7 @@ 'mining_basic.py', 'rpc_named_arguments.py', 'feature_startupnotify.py', + 'feature_stats.py', 'wallet_simulaterawtx.py --legacy-wallet', 'wallet_simulaterawtx.py --descriptors', 'wallet_listsinceblock.py --legacy-wallet', From e607836a2058fc6a5944578c3e73fbd7fd7e161a Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 30 Sep 2025 07:54:10 +0000 Subject: [PATCH 013/656] docs: add release notes --- doc/release-notes-6837.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 doc/release-notes-6837.md diff --git a/doc/release-notes-6837.md b/doc/release-notes-6837.md new file mode 100644 index 000000000000..757baeeb86d0 --- /dev/null +++ b/doc/release-notes-6837.md @@ -0,0 +1,17 @@ +Statistics +---------- + +- IPv6 hosts are now supported by the StatsD client. + +- `-statshost` now accepts URLs to allow specifying the protocol, host and port in one argument. + +- Specifying invalid values will no longer result in silent disablement of the StatsD client and will now cause errors + at startup. + +### Deprecations + +- `-statsport` has been deprecated and ports are now specified using the new URL syntax supported by `-statshost`. + `-statsport` will be removed in a future release. + + - If both `-statsport` and `-statshost` with a URL specifying a port is supplied, the `-statsport` value will be + ignored. From 733ccd3646d885b2b627c0c205699177d2630073 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Tue, 23 Feb 2021 16:41:43 +0100 Subject: [PATCH 014/656] contrib: make gen_key_io_test_vectors deterministic Also, remove instructions which are redundant with the README --- contrib/testgen/README.md | 2 +- contrib/testgen/gen_key_io_test_vectors.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/contrib/testgen/README.md b/contrib/testgen/README.md index 1efe07d483a0..fd3f9e660a2c 100644 --- a/contrib/testgen/README.md +++ b/contrib/testgen/README.md @@ -2,7 +2,7 @@ Utilities to generate test vectors for the data-driven Dash tests. -Usage: +To use inside a scripted-diff (or just execute directly): ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json diff --git a/contrib/testgen/gen_key_io_test_vectors.py b/contrib/testgen/gen_key_io_test_vectors.py index ba045ec1c505..40c89e85f712 100755 --- a/contrib/testgen/gen_key_io_test_vectors.py +++ b/contrib/testgen/gen_key_io_test_vectors.py @@ -4,10 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Generate valid and invalid base58 address and private key test vectors. - -Usage: - ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json - ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json ''' from itertools import islice @@ -76,7 +72,7 @@ def is_valid(v): def gen_valid_base58_vector(template): '''Generate valid base58 vector''' prefix = bytearray(template[0]) - payload = bytearray(os.urandom(template[1])) + payload = rand_bytes(size=template[1]) suffix = bytearray(template[2]) dst_prefix = bytearray(template[4]) dst_suffix = bytearray(template[5]) @@ -108,17 +104,17 @@ def gen_invalid_base58_vector(template): corrupt_suffix = randbool(0.2) if corrupt_prefix: - prefix = os.urandom(1) + prefix = rand_bytes(size=1) else: prefix = bytearray(template[0]) if randomize_payload_size: - payload = os.urandom(max(int(random.expovariate(0.5)), 50)) + payload = rand_bytes(size=max(int(random.expovariate(0.5)), 50)) else: - payload = os.urandom(template[1]) + payload = rand_bytes(size=template[1]) if corrupt_suffix: - suffix = os.urandom(len(template[2])) + suffix = rand_bytes(size=len(template[2])) else: suffix = bytearray(template[2]) @@ -137,6 +133,9 @@ def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p +def rand_bytes(*, size): + return bytearray(random.getrandbits(8) for _ in range(size)) + def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases @@ -153,6 +152,7 @@ def gen_invalid_vectors(): if __name__ == '__main__': import json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} + random.seed(42) try: uiter = iters[sys.argv[1]] except IndexError: From 957380c38b2c33adeab0eac36518445be7400e76 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Tue, 23 Feb 2021 16:40:42 +0100 Subject: [PATCH 015/656] scripted-diff: Regenerate key_io data deterministically -BEGIN VERIFY SCRIPT- ./contrib/testgen/gen_key_io_test_vectors.py valid 50 > ./src/test/data/key_io_valid.json ./contrib/testgen/gen_key_io_test_vectors.py invalid 50 > ./src/test/data/key_io_invalid.json -END VERIFY SCRIPT- --- src/test/data/key_io_invalid.json | 96 +++---- src/test/data/key_io_valid.json | 459 ++++++++++++++---------------- 2 files changed, 260 insertions(+), 295 deletions(-) diff --git a/src/test/data/key_io_invalid.json b/src/test/data/key_io_invalid.json index 500f11cf8ba2..75ca69ea23cb 100644 --- a/src/test/data/key_io_invalid.json +++ b/src/test/data/key_io_invalid.json @@ -6,147 +6,147 @@ "x" ], [ - "8212abd8rrzcJmBG9n7VmSpaMfPRXhyHkQtNXFAMPCUqQTF9Kmsk5VF3b81x1qt9tRsQ2REKQ6j" + "UJpnDUZhux15KckwqW1u8wratRydW8RgSfV7of9kh6S1EedTgkqawKx77AciK8x6iHVLd7jwizd" ], [ - "5CPYCiRk7LjiYQc45GQTgqNjYw5tWeCZd1NWvevf6SQfJvDm9vK" + "72JVw2AafMBFLZ3d6T6ymErSg5s7xAFAKgMgMLLiNXCPkLm4JDjvwhc1kjmApkcVNbqoMp2Hg2v" ], [ - "2UB8YkLAsoJWEsw6daxKvo5W4NVjLSB8Bsko6UCWMrLnoFKgETXdKa2XcvoWdjeeLbKqRcaWwGvq" + "s3YeU8Q2CU2NBzkKBgya6VA39EtN9zZLi5fscPmY7iqPL2VmFXHxn8QaeYoQXF5AyzC17C1VLo3" ], [ - "9sW9vnbLHfdcqhU2n5bLAFUksSgCiMBVmdn6Pae3iTYo49aSqZeS" + "7if6g2nYRkgh46JNa8iaoUYPzrbxMQhd2FNp2kVb6QLtofd76Yf" ], [ - "2FeLT8g7q25cvVmUjQnXvTt7rpWDtRjq2oi9F9zj3rLWxNKSoKcPLob6TEB2RAeumauRzj5Em5uL" + "3bxKf3oSh6kRKf83AhAiED6Sp85wHo3SKXnV1igNhXXW2Y23tz2e" ], [ - "7Vbg3REusN6tagLwHkz4P71VZtp8oTTtETmaTFJfypdX4J7vBmp6CMzWpNV3RUdv7TST2nigUz2oU" + "6uuF4qfGt6sZ4TGRKs8MYmrvEarZ7xMd592mLDH1AxtWoQ8dNB2KSTAjnsFLwPzBGJQzWP6sMw9" ], [ - "7obK6DhucEuvCinKLAUNYMPXNEFCHcErXa61wi2sMi66owinM4E8YD4rkgHVvxpv3ZGxFNXUyCA" + "2j8t4uU1uhfsSLe1Z2ozQpce9vdHUjxnu1N" ], [ - "3FPe28dxJDhEmkAbGVgsT5XFpBwyM48FK4veyiQVgAKvQLTea2U" + "869cULb66zRdpGZQ5duEfKbFrxTpFDpHYUuPjTVZMAbS2q4DgCqyozDcS9MYTyn1oHCGmSyabRQ" ], [ - "2ULU3PB7u3rxfUXKVb5mZ4ijXbj85LN99s7zess5XPZEX1DJvanHoY6dLvYnkLtQDWQ2qCq1GWSA" + "Pm6RGQxdC2MLeBHAiwZuevgCqpf1BXHV3mgrFyqkdHdGjSNCWj" ], [ - "7VBmPmJMUFwYhgktivvqBZcPzqdKPy4S4RRTACdwTiV6wNpkpXEmp3tvDhzkqdyFhXdJ88Q5T9mka" + "cU3bonutoouxvZ1eFjeWZ9fx3m1iejyyNT5LZj7APGXdf7xrM8za" ], [ - "2Zx1G5CMrEC4Y8EBVxvepmM1aiXv7CenqUR1dZBxC4PFUv9jAAm8TUxD4SKC2ArwKawA3Y4rhC2v" + "24MoumSS96TbYEz7zxLQUWM6XtptPk7EVKkm5dPeNmGdU13KCePZbDPCX83Jv2RDgF7dX1ECYkxCx" ], [ - "yaWZmeqWr5Mea62ECoMPzVkXxHGDF6LtCfc" + "aXxpJ39neQAdsvQyWpfmBnchstSHhzuttM" ], [ - "7VPs36WGcbdFsCBsMXExgBoMeme8T7eJ4g3UMqQrhrA2Lyys5vi3dgH43nbz1AV4YFz3ZRKLdFsnu" + "Cz22bHdzGqE9zmK5jGeTTnCLuoy8X5qRBw" ], [ - "2DS5LTr3AgrTGoBD3nqXWDPtCMd98mkop4wxaVfRWwPFYvmpXP5hG5bKzVVzvsKzP89THiiRd3Pr" + "meYhqRj9dCsx9ZjobviAvWzzxKD4YtMzs2" ], [ - "DeRXybcZve86XXanvmLG9XwFadEh8GjH2Y" + "XDMcG9HHawbsfN2AcNjPamQtPrCNZbX3HCWcy24fw6qbb45GU4QF" ], [ - "7pejnWw1qHjCfAmzXoStxFZYMJ6hu1gixoJD8irtyrki5A9PHDWV798YMaH3teBadyatmdV2YvT" + "dqqMGb5EYH3zsKxxrgDLAgQySTbC1amEoThMeEypQ9D8252JiRVr" ], [ - "4Yi2yBokcAj6acLZhiNwmmA1DSzuKnz8vfJ1FHE2yJLSTab6FUi" + "2UAEPV9iad8nM3vmhKM3Ca2WehF9A1EacV2K1U6mh5JGmZ4L8r7fePq2CDdLnhUUZp39r3GPz1NB" ], [ - "2U65F6JTxDP2YJ5XV6FiYxpWMCjwCRbEN9ygEXZ3KkbLUMjSbvKbPyCMLf6ke9kMPg9CLQcJ7aN9" + "7VkmEHVsiwybAT47JRmPPj95iKww9aTZp3xBkQm6dBU8nv88YdpwmYqaLr17TML7uf1JgrPp1duC3" ], [ - "7UgNNEAfdinxDG38ByGmVwbRJp7b4q8BEznm2GXbSJFVSTyS3jiByNLU8ckDprYkqvi5bcf61SUkw" + "SmBouAyTuApTNPEMcndUrzmk6zPtccPiGs" ], [ - "6wzMif8TRzrnyQYAbRZShWcZo3dqKYkBVpi2JR15DFMCLzxAkVTUpsuwdTUWnkx9j1kGDyukh1Go" + "tMYxSXg3m78N1fzNNYkT73ncYFCyag4yQB" ], [ - "6YKZfgYJSh9ivzFX1oNqTiiWwhCGrZbnDCigU6N5oyiBeVUKio5ww3Zu4N6K6NHsqaR8djSh9dLfX" + "2Fod12JmMounu8JvcPRU7vcQoTmkWGa382qVr9cUrL5vPokBgU2o8buPaNfyukRqESXLJ58f7BjV" ], [ - "92sWK7QesAPWS12srisHmc9q8Fz8Z5ERJcAaVwkjVKENJmL45bBM" + "6Yrq4S5f6WPJwZefp2FUB7YK3mebXuWBax5joYHbaWw8NyJXnG5esmKL2yA41yDJDJmG45TZDPWc2" ], [ - "2Ua8heeYCTTsr5qgi7vorTpDAWVRGRAJKZ7" + "AiPTzbcd3AWc2cxTLFvJ2Sac5NHA48zVtJ5gFJCun6rZHgipQHjp" ], [ - "2A6BvvmZ86GG7TRwXyCaCvhiM5FsVFvs42x" + "yiLhQ2MjXvvJ77hDB9Yh94rNu6XF2tsQ9XY" ], [ - "R5uiBq2rzVGhxDsq3MeGU3QKeZz8wLywaB" + "h252Gub658y8BeTRXJoSgxbiYPHjgAxL4H" ], [ - "279AcBMDWxh5hnfEm5kptwnfn7rEWschhdxkEzLYzEj2K3wSHfEaqV2moec6wP5w3jVy5cFhoVXk" + "6v7e2HwCqmNQdcdXcedxAaB5EhFcmHfd8NmVgz485uYNZeMUGzKqWXgRissqxAWFK2Grv2hj1Sx" ], [ - "7VMRsPVwoj7vZATKQGj2yVEX55f2qNKDxNiw7YNJv1zjfFFzyWR1WS67tefp7fV1MpXrUhNh86e8Y" + "4fJuZhkcDMRe6frBw8arVJZDXvW41DQBPJncf2GFGFU16qryC164" ], [ - "2V95nNKY2YtDuxyQ6K4unrkKQTmhRpVcUiXdYKxfJSrQG22tHmNojaXc5gUy1ZY6cAByCBMZZsas" + "41edLVs3QGta83S4Sk7Qer45eBZAEqpTzJ2YsKJyX9tMj1KLxak" ], [ - "2D79xmaAZxTf2fMRsTGgkNfMrUErj5CqXNY" + "cNonA1fNAJ4CZpeYi4fCvW42ZXbVGuZiFSksy8kbYm6LgzxGicsY" ], [ - "7nwQ595A7nXdySFQ2pHx71xJsVLhJAeoj68MSSDHhPMxhBQDynYa5KDXEfhW8jZdZeAkraFMy7b" + "fbHnS1rzoSTiteGiAGEz4tym48S4kKfVcGpr2FEMPuyeHFZ6TSz4qJgqxHSVmQVTpEznUX6Xmz9" ], [ - "7rLMRn8VSmsGV1QUfZf5xro5gejEST4fuXpokKSvqPPX8wmMabe" + "2o6ucMbiG78JSuasuD1dzkAMNZNoV7pgn1" ], [ - "J3JtSQiZ46yXNEXpuzjmA7R366BBKMD7id5PZ7cX3V5K5DcP3M9L" + "cxG8jxnDiR5QDFSEuaiYvjgS3sShzi2nfC" ], [ - "21WFTGZEpLyGHqVmb7a5skPDN5oD7ocR7r6t2osiqDQf6Xa84mkh3odE1cbZ2ocTFMZbHSrLXyJr" + "cW8exV1kXUQxrWrkW1LTXYPK8Vj8UyXxtfHLDACz5VFe6Wwy8a7E" ], [ - "7oShSMyZqoK69tWvUEwLBGWHzZFvuVV5SESa4rBe3TmNzfJXBJBP528XMMMiQxoX7fSAzhF2bYL" + "2UQ3i886Z21xg8pzahrG3zq8DdQnoKzSGMczG8qoL48yaxcnD7ob4M3Sxpwq4JykSedRp1MMzgmB" ], [ - "KqarYQ2A4jimBefL5i1SRpbz8xn3y3P2UX1tafCgf48LXTDrKCcf" + "UDPa5QS2J1xquuB4TD3TPTdULaxdDqy8hcdkXwgCmNqgSZYxPYCvvbzfVkz3nRamnvBTzfJizmC" ], [ - "gSzq4StFrs2i8gTZ84pw9x68aPQ3XFe5b3oK669aSd68yfFmxH" + "2KrxFisUvAk88XK4y8SAQxNeHQdjjLeLaYK" ], [ - "7UdhV95GSa6CY2XjVYfrqrwwyCScmeksu6TCqcpRSeeMbEmDydSMB4E2CCHFZ4gLoXXB7z1iWsGws" + "2EpLmnuvdXBWJR5JdiCrEp4N5AcwVQ4Ckfc" ], [ - "2msvzdVt86YVXqkWQBJG136u11REi8cXpn1" + "4qn18vELFKKBU44YJTvYgYKkrDUvfc5SJeq1BDFttQLcZCpMzTzf" ], [ - "ryMiy7TSV4LfnJWeXdvT9sZgiLj6ebNg2ac6eLdSAE1N66u1SHwASam7njVBMLYuzr8ntySNoUT" + "XDKnECKqQUp17zwQTErBzGtXYq9uPG9ybqfJjLVJZZh37VHZc3rC" ], [ - "6YnxQpwLv2PWVRw8cf5JVm1HmLP3UrQyyUMufDLzuYRg1vbut9he27XAzmLp7m4WxTz7vsriEwQcX" + "92c7ZpyM2KQBK8N2k1otwpGj98Ex7S8jEAnzB5hfXhBWJLFBkwc" ], [ - "92g6icyL3H2XCqLiukqquX15fwBR5kFE744En5cd3X61cpANrjqY" + "7VtmMAqgjZbpSzWQLy6j1LseRhmmgtBF45c3PrMqme5TP2ak7twfPGwivrVEVALLNnXRUdu3QB13k" ], [ - "2XCxe9EpeGVMkfZqFgtAKmU2GNBpwnmukDs" + "s3KqChhTvrQCUzbSEqbAMEVTdh84NdC1xHA8cvaXYEjohnV7xt4yNre8BMumXVeLXKTp49Sonh3" ], [ - "6YdrW3VDyE96kBFvKghrZ6fkwVngGgJybFnrFHXfexSorcQPxzbHLqNWTSMZHkrdmeQ2ZsLCnFhe3" + "ibJ6v1xJscnjqdAdqqJco864ff4W31Jh91" ], [ - "PsfkAxuhDQKQCwvuwfiVo1vdcYc4fLLr7oZ98XrMyp7SAsLAyQorF" + "sAyhi4v9VWr6JxTofdk8FhUUw88BmEbTsMvspzxQ6BN1mExDpWPZi9cF9RGgdFzsMDkFPfnykiCk" ], [ - "2k2HeazZ1apwsF3VQHHCbooreJ2anjumTTU" + "2bqgYSiSL7bCzXWk98UBUrRiCALgRyUWRWk" ], [ - "724Fw3TFoSoXsZK77FC8NfrgN2scVC8pq7SntoNgCY3oosmEZHJLcrZVKiFagFm5TLcbfuhKm8ewM" + "2UDoz44DrDa2uqVdVQEFKEhuM9AvpFSE3N8iNhc5V5bQ4ezAMdZfNoEGZKVSs7uNs8JLQX96xzDm" ], [ - "7VrbcSqCJndcp187BT68T5eLXF3TQaH7vN57nAhNhbCyUjtvuX6rtDJYNLmXhPTSrE5dkWqifSsRG" + "eXDPBkXCpUQA4ZRfduqyDDtcHnoGSaC1d2BN3B6dWREUaqusqnG4" ], [ - "8SHxzARcUKM6NdLyM4JDSMoSkA99SgZ3zP85JAEaiipxnUXuMBtV7dtx6cN1wP5df4bQ8G6MsMv" + "29LjxDzmddtZy3odqFeu4DTFhF7ALC9hY2Vu8yPD5DzCrBMB4rbDq5b7Vh26thgfXzMXRsPtZhit" ] ] diff --git a/src/test/data/key_io_valid.json b/src/test/data/key_io_valid.json index 852e2006f3cf..d83bb2adaeee 100644 --- a/src/test/data/key_io_valid.json +++ b/src/test/data/key_io_valid.json @@ -1,461 +1,426 @@ [ [ - "XtLoNJ2NpPbxYsFYFHewTy6QJQCcvziDAx", - "76a914c1a7e32962dee960126497b3df1fd4072c93f50388ac", + "XqZHYpoksmbEPtWstAEki3o8pJ8ZsPfXpK", + "76a914a31c06bd463e3923bc1aadbde48b16976c08071788ac", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "7iRPy8FEHBzbChrktsG85YbDZ3SuiCxsNq", - "a914afa615ddffcfc364f952956c251bce5042e821bf87", + "7XShCrc5u9rZZv7j18WqbUMZMxp8k1Hq4z", + "a914373b819a068f32b7a6b38b6b38729647cfde01c287", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "yjPCQXXeWJLFQWM6kF6dKRczZbNkSWVtrj", - "76a914fd00802f8b61ccf0c1bb1c7969c832673ed546a988ac", + "yf7WoPrbJCGhLLtpeBe7tteKEKwpvZ1w97", + "76a914ce28b26c57472737f5c3561a1761185bd8589a4388ac", { - "isPrivkey": false, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "yjPCQXXeWJLFQWM6kF6dKRczZbNkSWVtrj", - "76a914fd00802f8b61ccf0c1bb1c7969c832673ed546a988ac", + "8yCvxUt2TYKEQb5Ak6n9DYiCx22xfa3Lio", + "a914ce0bba75891ff9ec60148d4bd4a09ee2dc5c933187", { - "isPrivkey": false, - "chain": "regtest" + "chain": "test", + "isPrivkey": false } ], [ - "8yDrsN8yRfbG678S5eVntDtAHdnuQKmCDx", - "a914ce38bb4d79dd633ba5fbc0ea1e3b85a637acdf5987", + "ycjYu5VF3amLVDTSSA4E5sJjGjRpRof3np", + "76a914b4110ba93ac54afc14da3bdd19614774a2d55d2988ac", { - "isPrivkey": false, - "chain": "test" + "chain": "regtest", + "isPrivkey": false } ], [ - "7rJYzFcK4nNu4oEFUZL9heG4u4XNvoXdkiCaddtQZ8oT4wMxPta", - "6333e256bb02f09b3bb233c17c5e86562cc39571ac2eba07a92d99d1f6faeeac", + "8o2MHyL9pMd9NFiy3xPJKFq99YTyyLv7KT", + "a9145e5a35ab44b3efaea5129ba22b88ba3e2976614587", { - "isCompressed": false, - "isPrivkey": true, - "chain": "main" - } - ], - [ - "XGCaeDGqsxPK93JHP3kJ41MEAobFu42PXkASRLXBkCx3bKBkrGMn", - "9296962c50327a60f78a11b6f017ea3b22182892fba9c976a765c5b014f25802", - { - "isCompressed": true, - "isPrivkey": true, - "chain": "main" - } - ], - [ - "9213qJab2HNEpMpYNBa7wHGFKKbkDn24jpANDs2huN3yi4J11ko", - "36cb93b9ab1bdabf7fb9f2c04f1b9cc879933530ae7842398eef5a63a56800c2", - { - "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "chain": "regtest", + "isPrivkey": false } ], [ - "9213qJab2HNEpMpYNBa7wHGFKKbkDn24jpANDs2huN3yi4J11ko", - "36cb93b9ab1bdabf7fb9f2c04f1b9cc879933530ae7842398eef5a63a56800c2", + "7sUh9RiHaovsNoNToDz3gfSzbETZBodKCY8ZkLtxbDdcEueuNdd", + "fdeca3b08e38af53d7c4c60e3ad208ce5066441036e9f191e0b75036a77f65e2", { + "chain": "main", "isCompressed": false, - "isPrivkey": true, - "chain": "regtest" + "isPrivkey": true } ], [ - "cNddBPFJ6PbCX2BJVR6up2p8PjtFyUy7zpbkAgRGhMmrPbCe8Fvf", - "1f5750a1f5703e8b1a273964c7b7fce1871cc7a43f303ec7d33ac724fdb59eca", + "XK9kG3y8JeDgSNrXdomWiCiBMs7D2eNJSrux1rx7GuGLWpMxEH3w", + "eaa4752443233fbe8f8943bf956de595665c38ffff23827e17c10cdc1c27a028", { + "chain": "main", "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "cNddBPFJ6PbCX2BJVR6up2p8PjtFyUy7zpbkAgRGhMmrPbCe8Fvf", - "1f5750a1f5703e8b1a273964c7b7fce1871cc7a43f303ec7d33ac724fdb59eca", + "938BPMAhPitw3MZW9V5UBFVtKwJRkzJGkQuS4EsGiczaHH7Xed6", + "caae6c9810626198ff778740f88ddcf102aeb81daee289c044c4a4571c4b6f28", { - "isCompressed": true, - "isPrivkey": true, - "chain": "regtest" + "chain": "test", + "isCompressed": false, + "isPrivkey": true } ], [ - "XigZoHdsPg8qHTxUwyZFttzp8ocSUwLKv9", - "76a91457b39f318f3cd120e08a5f710850c03c22c213fc88ac", + "cRUCRTHRBX9rA9CXDvVEmuPMyRfWNvg8gpMiFiN77wNTJetkFari", + "7400f4b8e0b843f880c32d81e91bdea04cd7a3819b32275fc3298af4c7ec87eb", { - "isPrivkey": false, - "chain": "main" + "chain": "test", + "isCompressed": true, + "isPrivkey": true } ], [ - "7qX4wLps8eQEc8vVNxodyowpJtM8NQjt4s", - "a914fd81935b46055357fd1d2af1ca12ef055f9b0cc087", + "91bBTjMFB54xidVH6xxBVydzqMM6tGjao5FdAFv2JFDWFidQghY", + "0099527d041ced5ce0fcd4ce4e3d0e3de091f21415bb7cd011fac288c42020a8", { - "isPrivkey": false, - "chain": "main" + "chain": "regtest", + "isCompressed": false, + "isPrivkey": true } ], [ - "yT9LnoTWwW8BZfMsaBzb2MeYPW9rJXrjcn", - "76a9144adfa9d287098715d3d8be30157a83c14f3f01e988ac", + "cRfkYU8Qb7KSKUi6qxdVAV2rvb5uXge1wrqdkvESmAinnFKHh6ML", + "79f28c2a4387df9b6cf636ed8ac1bab033b64f66feaba65f70e684731e3f3910", { - "isPrivkey": false, - "chain": "test" + "chain": "regtest", + "isCompressed": true, + "isPrivkey": true } ], [ - "8kPTvJRz23wKti61gMrsi2vJgs7q27A1gm", - "a91441701951c4acaf87123358d268e84c066b05478287", + "XiXge21yt5xErdLNt1V29yJX2g29Xag9VM", + "76a9145605968d3a96380112b5a10f3a11e708dc54128388ac", { - "isPrivkey": false, - "chain": "test" + "chain": "main", + "isPrivkey": false } ], [ - "7rdDu7XRm5WLKmPaX62vqVLaQmPtYT18tbk2i7fY6eRnYBrsrJZ", - "8d98e24f45c472b2a04390fa80acc09ce2ce7d1ce041489fe5d9e8d89bdf6b2d", + "7XuP9xVGyvkCAfW84QJkGfbiR7dX9TYaPH", + "a9143c47ab7c368a21b9efe19293793ec879ce68301887", { - "isCompressed": false, - "isPrivkey": true, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "XHNgkfosQamJ6M4bptMJPAMpqdeRj5mh3DWRy7fmhYcBRhHez15H", - "b59fcc3195c2d5e3ccb19956b0d513154cff959ec3acf9bb6004670d728fee56", + "yNZpqbukFJzBUJSca4Q1Ny2YD8qwkBp874", + "76a91418a86e5a6c6977ddba0daca7fba5190f67ba56cc88ac", { - "isCompressed": true, - "isPrivkey": true, - "chain": "main" + "chain": "test", + "isPrivkey": false } ], [ - "93WCLruvEegkBVYc74utyt8xvt4TxunX1kHryCQtc3RCJ724Uuq", - "fcac2955e7b3e3c28a1e069b177089574defb34741e9079a977e760fb36cdec6", + "8zVH1QmXBYLvJTainJqF2tBVbhfj3G3oAj", + "a914dc1b3f31308972236c2e47763fdfec1371cedcdb87", { - "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "cQqbUpKkaBKG1TAyXFh6xGKEfQMZChtu6Tv1NsZMHHwTXja6QnWv", - "612c8e6ef60e6d036e4cde5882e55978c84b08825a246098f1e8bf1e068f43a4", + "yZ6DTXL9jL6JnasgbBwqudiLbjHnNpMfw2", + "76a9148c190ca6ff8ad603f817edc0d93c2a687c7b36dd88ac", { - "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "chain": "regtest", + "isPrivkey": false } ], [ - "XkMbn4X26GbXrb5U8t1dP2miqvwJCqk6XD", - "76a9146a0d9f3c2bcbf03ceba283fe3feeaf8eb92f7acc88ac", + "8ooZRudHQeoPpTHJN9qf8uWLJqhQ5MhtE7", + "a91466e70f2a6100fc6343edc8c874496cb2f5bbfec887", { - "isPrivkey": false, - "chain": "main" + "chain": "regtest", + "isPrivkey": false } ], [ - "7bNrQPXbcGnbe3K2dUnQD9bXawugSgX8cX", - "a9146262063c2a4a0a638062aaff356d79c35600a82187", + "7rdh84TGTA8dhKgRmWHHVS8n7EYC6XU727w7RFCNBcHyu4Q3qKU", + "8ea9b77c27304b37f70e94bc8a0fbf500e0c957a80ebda87280ef58214d92f11", { - "isPrivkey": false, - "chain": "main" - } - ], - [ - "yXU6wzQ9mU8VmSppe4BLTJ6wuCgeAddPkG", - "76a9147a4c83f2194c3d4b8d2557659807e31fe549e1f388ac", - { - "isPrivkey": false, - "chain": "test" + "chain": "main", + "isCompressed": false, + "isPrivkey": true } ], [ - "8kvSTstYGkxogEXxJ3ALCD4bwMSnVV2uR8", - "a914474bc756e41243d2864aa4aae4e962edb3b4b51487", + "XGPEaEBfRr58YRbgaryi6knXMNevQ3kNm9GrefU5X1TbgSDgY52A", + "9811acdc3c671ef1e3913f94980a9e146ba895908550ef4234abb7503d436521", { - "isPrivkey": false, - "chain": "test" + "chain": "main", + "isCompressed": true, + "isPrivkey": true } ], [ - "7s4u8NofVVzDz4uz3FELHRSLccn9G6AaTpnkEUeRCHmmaFeysZE", - "c7e604df1cca9e700fd023b42495902d1c8eca2642df42543451b3124d9c7279", + "92tWd3HBeCrToLZ8pkWRUqsoKNmYZgNN4NfWCzg1kzC5jGjN8Ma", + "aba54c7550edc0ef1202759fff90ff19128936814321ee59e111e13e5e482870", { + "chain": "test", "isCompressed": false, - "isPrivkey": true, - "chain": "main" + "isPrivkey": true } ], [ - "XEPuZVWCUjRFeoW9hKg4DB9QsZhqojT9uqFt3Mu2UVc61GkRwWdH", - "5cbd7421d9fbe6b3bc5b684da2054091da8a64b2a3ad25ab7960d452fd05b3ff", + "cUjojhwLQ7yhysRQJmvFGXBB4C3ubsob4GAB2S8RLN7WMnybQKLz", + "d58bb44d9cfbfccea78702aad18d4ceea91af0e022431de31bbe8d2745489a35", { + "chain": "test", "isCompressed": true, - "isPrivkey": true, - "chain": "main" + "isPrivkey": true } ], [ - "92bAGbFfnYyk78iHJ7PdhV7TsJe2scVFpQ3BjWkrcfBEFBQGefV", - "843f9361a729052d69193c9f47b09383416881f68188bd1139fee97bc6ec7d92", + "92yfMV3G9QXZQE4qnpaqHEbUFJtZptQF2JXqFR1zSMPKtTd6ahi", + "b75734afa2da43817d40e7e8d80d17a26cd4460b0055c521a3fa4329bd718db4", { + "chain": "regtest", "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "cRFHttGojDviBmDokZys6C2RHYrtwiyyWtcQmT7gWSeRbFQY4e49", - "6d5d556d5843e6c550b83f4a33be4dd3bdd5d143c07052a0cdf1f1b351d9cd2f", + "cRFfmkhkhaqN8bxNqnr4GPTV25WRk2tXLtsqbd3B7CPkbmA4QDgt", + "6d8f021c13f1e2b0e7268b09d55e958d256e200a4e5de6eecbf8dc0ae65b35ae", { + "chain": "regtest", "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "XdsK6sSz3L4zLJCdyPRukR6AR2LythNSqi", - "76a91422e34cda0ba9e2affb620720ae8a7797567da63988ac", + "XgVUBJHMpE4QhFXkhhQbgJK84ASDya7GrU", + "76a9143faa1a5ac78fe2df68f99ebf27ecee3cdd29f9cc88ac", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "7bofzFHNeodW6iJGv6bzkejVHDE12DPXm1", - "a9146713ba75a430dc678a402012a8fdc40fcf3c2ba587", + "7mJ7bHEhHRdLZeETB6aQ1v5ABRitMzRoL8", + "a914cf2de169062dbcec55c8ee69cdabddbccf3f442887", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "ybtBPfTFkCUWFBzGmGnDQgyfWB5N74LBwC", - "76a914aabab5cd40c1cc70d254e8616bf75edc2eaf4bc188ac", + "yehwEL9J7DfHgNQwBVpgLXrdaxGDZTYirT", + "76a914c9b31b61df09db783833d1eb75594ed2cbdf3a3988ac", { - "isPrivkey": false, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "8oDjDp3wqAdWLEuT4LXPo7N8ZHWoJLdMU8", - "a9146081170c074531062628484693ed4e97b396367087", + "8f2fFU41QNkD3WSoSz8pvgeDpeREhBQxmG", + "a91406a831665447dd11f7c54759a48266adfbd7895487", { - "isPrivkey": false, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "7r5ffXenTyRA9rXf3Mgji4fh33AFYuMm8eSaKpRMnimgQV1Z4KV", - "45f22c0ff5e0f588f710377c9c940ca11bfa920a2b5cc5ff4b6f115e8d2bc2d1", + "yiCbYijPuRo177xtLMeNqGMaVu5wZjpfTC", + "76a914f0071de0f8422d94f6fb43091b986f58bac9506f88ac", { - "isCompressed": false, - "isPrivkey": true, - "chain": "main" + "chain": "regtest", + "isPrivkey": false } ], [ - "XGYzT6mPs7L9z9S6Ryn8og8excfLtH7J5easZjDWH2kWfRBmoAgR", - "9d16918481c74120a5b60b684ac94a08ee341f1788575548b8c45098a88a9c29", + "8teDju3BzB973H5P2Zt5iJ3K7b5rcjXsrP", + "a9149bfb821d62e69330410bb56f0085ecce89afb8f087", { - "isCompressed": true, - "isPrivkey": true, - "chain": "main" - } - ], - [ - "92xFEve1Z9N8Z641KQQS7ByCSb8kGjsDzw6fAmjHN1LZGKQXyMq", - "b4204389cef18bbe2b353623cbf93e8678fbc92a475b664ae98ed594e6cf0856", - { - "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "chain": "regtest", + "isPrivkey": false } ], [ - "92xFEve1Z9N8Z641KQQS7ByCSb8kGjsDzw6fAmjHN1LZGKQXyMq", - "b4204389cef18bbe2b353623cbf93e8678fbc92a475b664ae98ed594e6cf0856", + "7rzRZkXmWGVMxQXyG1RsRHfE1xMS2SgFvxWrU5WqrgAvG9gswQo", + "bdbcab325d6e11f2aaeb549f50a9d91fb8e64c814faa685367b24b8d20316baa", { + "chain": "main", "isCompressed": false, - "isPrivkey": true, - "chain": "regtest" + "isPrivkey": true } ], [ - "cSs1wg2Lm9AvQp8diEdfTp4kaaYpLm6GzFD3sxtCLkU2SJiD4qqk", - "9d948bbecb677f6fcba65d78964252e5892a491351ce1b6d0a69c5678de330fe", + "XKLuKLJsbgxuod724Lj16mjUNnNnuvE1vwrgsaKC3GFw2oEET7eZ", + "f061adbfe72c9d914d678cd5004d49356ec9949ba752777171ac368279cbe6f5", { + "chain": "main", "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "XjmoGTCeDNDCGY6A4ed6d3VBdJmdsj17EJ", - "76a91463a911fcd13311d90188b372b6f8ab7a853ef05288ac", + "938eJS88jdcby5LGUmkAbiGEtf9aQxHK61CU722DCTFbcmkQMo1", + "cbbc2ba8154883a9a29e5517d1f3c03cac4f39ce3225060b3efb799cd9c41274", { - "isPrivkey": false, - "chain": "main" + "chain": "test", + "isCompressed": false, + "isPrivkey": true } ], [ - "7gtFVBDLHVg4AVBJpneR9fNr9SyEZKaeic", - "a9149ec9fc088ce0a02386c787227e699e50256edbae87", + "cRAUNG3GXFQhK7uHdzdowk8a49oCE3b7jjqTajbxo8hmM3Brn4Ud", + "6ae2a19331b7b2627e663e25a7b001e4c0dcc5e21bc76c382dcdf5b284760c8e", { - "isPrivkey": false, - "chain": "main" + "chain": "test", + "isCompressed": true, + "isPrivkey": true } ], [ - "yd3pNG9y4V2R3wkjfzH5kvx3ULab6y6wf6", - "76a914b785720a94fd55e6f1c738fc465918bdd428046488ac", + "9254r4ogv44ojZ22dV1CjERwNrSnfk7v4CLZQxKNSAc1v9QvLg8", + "3fead91f7422cd76aa87fc8f9851f3c1e4719cd0b8e4816dd4e88c72e528bedc", { - "isPrivkey": false, - "chain": "test" + "chain": "regtest", + "isCompressed": false, + "isPrivkey": true } ], [ - "8f3s8CPyf3fWEzsRo63SRzeJYVkVtqUHC4", - "a91406e2857a0b00f890ff8d8a5cd43efebbdf48214c87", + "cRenV6gPhnsKsxCpB3ea6S921FKkZjRXsMQ4RtmENqaH9h3rZ1sP", + "797342c03fd7a346c4c7857ca03d467013b6493c455551e48a1423263b62b127", { - "isPrivkey": false, - "chain": "test" + "chain": "regtest", + "isCompressed": true, + "isPrivkey": true } ], [ - "7rz61M9gL9gtoZWJBbF2m34ajNfAaPmJrRyCBxuSn6D2VFETrfE", - "bcf8a68ca0454f7a356a9c3e30d8f25ae30d9218a58861bea11b4ccfbdce83fa", + "Xs7iEDx8nMwJHdiQnwvCnLBTP2sjmDGTJA", + "76a914b436106a68548a776a0f34d56b63e7c595f2b20588ac", { - "isCompressed": false, - "isPrivkey": true, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "XJ6a2G4a1ieDujR12rZy5T3tcRmNj5AfBmEw7jJrrT7XxBbjcWc5", - "cb2bbfe7aba4c95555da06409432cae7fd3a7ff59471c0e33744c27d49729340", + "7nTHC71wyAMc1rGdokAJ5wCm8KWLdaQKnR", + "a914dbe1c393617a01f15a4cc063dae4f4d56b89bfbc87", { - "isCompressed": true, - "isPrivkey": true, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "91czfGPDq43RjAAcMFJh8kRqiEA5apW3R1S5xYY44EgUyNUFAPn", - "04b7a1a49323625e107fa24634de2d41ddb693320fb3aabfd471b6ccc2120af2", + "yZ4dt5Nte6HU3dYuSrsu5Dfmuxec2hciDn", + "76a9148bcc9ae5387c38456f7c076356abadcc67b92ad788ac", { - "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "cULWxgtMoehad4qqafyxrd5nEgtriXbFwQNc9MYFFWj16yagKxfB", - "c9909a0cf712be81d4f88526658f1fb46333439fd2ec2e58ceb9f469ee69b303", + "8qMXnvtgD7KQuW9bSWBLND7tPjSWaPEaNd", + "a91477eb20fb9f8806e8649790a90615a46d22dd762e87", { - "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "chain": "test", + "isPrivkey": false } ], [ - "XuuUgm1e17hRV9pUKDzf1Xf6pBA35XaNsJ", - "76a914d2ce252de1e1dd9d1205a9672b274f488d943c9288ac", + "yMSGUgRUN8cvqLawxWkrWpmcvp2uJFcsyG", + "76a9140c42615336745356c2e16147c0f3d46b40d5147888ac", { - "isPrivkey": false, - "chain": "main" + "chain": "regtest", + "isPrivkey": false } ], [ - "7gG3an3hmKQasE36RWzhfhnpy5y6M5zZaP", - "a91497f112c9c50ac814a953202f485eedfd2cf856b587", + "8erZsDLXAvEFj3B4ojXDykVBsQGC5NnCnU", + "a91404bf8a0dfff35939a611c7f5a60ac107f33f33d687", { - "isPrivkey": false, - "chain": "main" + "chain": "regtest", + "isPrivkey": false } ], [ - "ya2zvp6VqHwHuNzRYfVKScpFwAEaiVfwrb", - "76a914967577dd6bb09cd009d81be910b37ead7ad31e1a88ac", + "7qbLb2DVyHjuAUNRhCx825iHyprzsKDNxXfsfLyLKM8QtGQ6p2E", + "059f273d2079ab1d90f23777b341c45e2a9b9bf6bfb71dc7d129f64f1b9406ed", { - "isPrivkey": false, - "chain": "test" + "chain": "main", + "isCompressed": false, + "isPrivkey": true } ], [ - "8qYkoymbuRbcZPfSVw98jQG9Te3333o25Q", - "a9147a0a9245e83625c635c9b1db175338d2e22e197687", + "XDxKV8WhErqzqxiTREFvXZk6R7ABfTUEsz7rZuGBGWEAWHhjpjEX", + "4f93ade8f56065f1b7321397b0d4a03e1ab2c54dd9af99ce1ecbfb90c80a5888", { - "isPrivkey": false, - "chain": "test" + "chain": "main", + "isCompressed": true, + "isPrivkey": true } ], [ - "7rLo1znMUU7gZQy66JvrHkvozumpxWVDqcVvENUnKDyoGG7G7Fv", - "684b0d908dfa54a9a9ea86f0b95b8a570ffe6c705b21c98f8753f0e152234cc7", + "92RDKbNmQjwjVNkNx1nDSRoef8DF6r8yF9joBTVmoPh25JCjSdc", + "6da95e1181a55703d96bd27d1b6ef55ca2e4d475b5276f2dbb85f7a6459dceeb", { + "chain": "test", "isCompressed": false, - "isPrivkey": true, - "chain": "main" + "isPrivkey": true } ], [ - "XKWfDoYj2WAHFohAkN1usLZ7qp7pTDJrSZZq6mygDwcYfXufZUcW", - "f566a26ae4e40f6dc430c8b554fa9f9a9dcb2241c5d6e91bd6974b8efbd13604", + "cSCX3hnVyzkF5pugTwkVBP2pffX7aMSiJK3RotHtHDFYKfskaBwq", + "89c67b776fd3bb974452da3ed4ef1647e1733ec076919cab6156077ed9532e7c", { + "chain": "test", "isCompressed": true, - "isPrivkey": true, - "chain": "main" + "isPrivkey": true } ], [ - "924PDowxE6eFweoqHp1wJbPhmmtw4wQWLNXf9PSgGUpRS2Qb45S", - "3e5db91a1ddfc8ddd7b70c1f42dd17956a134da974a69ac49b86ec6dff40c6c1", + "91zradAfFyagfLH8xeJczprPqWRudgoyecx9PuEa5JgzWr3CxoL", + "365acc425747e198b3e1468e0284f230153db8687d8ec23db079a5b67d72ca04", { + "chain": "regtest", "isCompressed": false, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "cNACCSjtqhYrLCdoFjfPUULdhPjhsA5GfgoqZHomWCUCpM9FMeJj", - "113acbdd52394bce786759896bc0c856bb8250f73dbc534dbc4abb7e9ff67307", + "cNMyu2o6XzZ9ekzi5YTUZ3EXtTUHSqrcKYg6DeyPibiKb8v4kGsa", + "174b3867b13e4ea9945e798d87586cffbe8c545ab374454e403b1eb831501ebe", { + "chain": "regtest", "isCompressed": true, - "isPrivkey": true, - "chain": "test" + "isPrivkey": true } ], [ - "XetExvaUvtrzqUaMgXBzxa4ZZsebdzGxha", - "76a9142e0863783ae131b41bcac197738de589881a59c988ac", + "XoGGRaieFVcqKme6mNNTWaXuLFvs6DkwbT", + "76a91489f3c3b02f3137bd7b46b996fac2869848fb19d588ac", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ], [ - "7mkzXHgVaVqFF8bvKVpRrL6LeS6tLPBHw8", - "a914d44335f13c8d26c9ba5106b2d4f179ed5a65028687", + "7WuHz4wne6YhnjEucc9mdDtWrd5u6NGhz5", + "a914314b3a5c2d4d03b58820460bf90d8d4ab2f120a387", { - "isPrivkey": false, - "chain": "main" + "chain": "main", + "isPrivkey": false } ] ] From 3739540a2ba89c62bf15952c9baf3ad887a30ca6 Mon Sep 17 00:00:00 2001 From: pasta Date: Tue, 30 Sep 2025 16:03:09 -0500 Subject: [PATCH 016/656] coinjoin/server: validate DSQUEUE denomination; remove redundant timestamp screens; rely on IsTimeOutOfBounds(); tests: add denom and timestamp coverage --- src/coinjoin/server.cpp | 7 ++++++ src/test/coinjoin_queue_tests.cpp | 42 +++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/src/coinjoin/server.cpp b/src/coinjoin/server.cpp index 23c8f5e09a86..70c5b4772109 100644 --- a/src/coinjoin/server.cpp +++ b/src/coinjoin/server.cpp @@ -117,6 +117,13 @@ MessageProcessingResult CCoinJoinServer::ProcessDSQUEUE(NodeId from, CDataStream MessageProcessingResult ret{}; ret.m_to_erase = CInv{MSG_DSQ, dsq.GetHash()}; + // Validate denomination first + if (!CoinJoin::IsValidDenomination(dsq.nDenom)) { + LogPrint(BCLog::COINJOIN, "DSQUEUE -- invalid denomination %d from peer %d\n", dsq.nDenom, from); + ret.m_error = MisbehavingError{10}; + return ret; + } + if (dsq.masternodeOutpoint.IsNull() && dsq.m_protxHash.IsNull()) { ret.m_error = MisbehavingError{100}; return ret; diff --git a/src/test/coinjoin_queue_tests.cpp b/src/test/coinjoin_queue_tests.cpp index 1f7cfa44d3b9..55323a5a37ad 100644 --- a/src/test/coinjoin_queue_tests.cpp +++ b/src/test/coinjoin_queue_tests.cpp @@ -53,4 +53,46 @@ BOOST_AUTO_TEST_CASE(queue_hashes_and_equality) BOOST_CHECK(a.GetSignatureHash() == b.GetSignatureHash()); } +BOOST_AUTO_TEST_CASE(queue_denomination_validation) +{ + // Test that valid denominations pass + int validDenom = CoinJoin::AmountToDenomination(CoinJoin::GetSmallestDenomination()); + BOOST_CHECK(CoinJoin::IsValidDenomination(validDenom)); + + // Test that invalid denominations fail + BOOST_CHECK(!CoinJoin::IsValidDenomination(0)); // Zero + BOOST_CHECK(!CoinJoin::IsValidDenomination(-1)); // Negative + BOOST_CHECK(!CoinJoin::IsValidDenomination(999)); // Invalid value +} + +BOOST_AUTO_TEST_CASE(queue_timestamp_validation) +{ + CCoinJoinQueue q; + q.nDenom = CoinJoin::AmountToDenomination(CoinJoin::GetSmallestDenomination()); + q.masternodeOutpoint = COutPoint(uint256S("cc"), 3); + q.m_protxHash = uint256::ONE; + + int64_t current_time = GetAdjustedTime(); + + // Test valid timestamp (current time) + q.nTime = current_time; + BOOST_CHECK(!q.IsTimeOutOfBounds(current_time)); + + // Test timestamp slightly in future (within COINJOIN_QUEUE_TIMEOUT = 30) + q.nTime = current_time + 15; // 15 seconds in future + BOOST_CHECK(!q.IsTimeOutOfBounds(current_time)); + + // Test timestamp slightly in past (within COINJOIN_QUEUE_TIMEOUT = 30) + q.nTime = current_time - 15; // 15 seconds ago + BOOST_CHECK(!q.IsTimeOutOfBounds(current_time)); + + // Test timestamp too far in future (outside COINJOIN_QUEUE_TIMEOUT = 30) + q.nTime = current_time + 60; // 60 seconds in future + BOOST_CHECK(q.IsTimeOutOfBounds(current_time)); + + // Test timestamp too far in past (outside COINJOIN_QUEUE_TIMEOUT = 30) + q.nTime = current_time - 60; // 60 seconds ago + BOOST_CHECK(q.IsTimeOutOfBounds(current_time)); +} + BOOST_AUTO_TEST_SUITE_END() From cc2f4756116b2133246d71a8c19acfe6a669a331 Mon Sep 17 00:00:00 2001 From: pasta Date: Tue, 30 Sep 2025 17:50:12 -0500 Subject: [PATCH 017/656] fix: correct thread pool sizing logic in LLMQ and BLS workers The previous logic using std::max(std::min(1, workerCount), 4) always resulted in 4 threads regardless of hardware_concurrency(). This was because: - std::min(1, workerCount) always returns 1 (when workerCount >= 1) - std::max(1, 4) always returns 4 Changed to std::clamp(workerCount, 1, 4) which properly clamps the worker count between 1 and 4 threads based on CPU count. Fixes undersizing on low-end hardware and potential oversizing issues. --- src/bls/bls_worker.cpp | 2 +- src/llmq/quorums.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bls/bls_worker.cpp b/src/bls/bls_worker.cpp index 8041d14fc511..1a65899be450 100644 --- a/src/bls/bls_worker.cpp +++ b/src/bls/bls_worker.cpp @@ -59,7 +59,7 @@ CBLSWorker::~CBLSWorker() void CBLSWorker::Start() { int workerCount = std::thread::hardware_concurrency() / 2; - workerCount = std::max(std::min(1, workerCount), 4); + workerCount = std::clamp(workerCount, 1, 4); workerPool.resize(workerCount); RenameThreadPool(workerPool, "bls-work"); } diff --git a/src/llmq/quorums.cpp b/src/llmq/quorums.cpp index 7368914b287b..196967cab358 100644 --- a/src/llmq/quorums.cpp +++ b/src/llmq/quorums.cpp @@ -234,7 +234,7 @@ CQuorumManager::~CQuorumManager() { Stop(); } void CQuorumManager::Start() { int workerCount = std::thread::hardware_concurrency() / 2; - workerCount = std::max(std::min(1, workerCount), 4); + workerCount = std::clamp(workerCount, 1, 4); workerPool.resize(workerCount); RenameThreadPool(workerPool, "q-mngr"); } From 41e4324a103f889ce724aaafe9b28612cd48cc0c Mon Sep 17 00:00:00 2001 From: pasta Date: Tue, 30 Sep 2025 18:24:02 -0500 Subject: [PATCH 018/656] masternode: replace RecursiveMutex with Mutex in meta store/info and add thread-safety annotations (LOCKS_EXCLUDED); avoid re-entrant locking --- src/masternode/meta.cpp | 14 +++++++------- src/masternode/meta.h | 24 ++++++++++++------------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/masternode/meta.cpp b/src/masternode/meta.cpp index adb2aaa30259..783615c1b222 100644 --- a/src/masternode/meta.cpp +++ b/src/masternode/meta.cpp @@ -66,7 +66,7 @@ void CMasternodeMetaInfo::RemoveGovernanceObject(const uint256& nGovernanceObjec mapGovernanceObjectsVotedOn.erase(nGovernanceObjectHash); } -CMasternodeMetaInfoPtr CMasternodeMetaMan::GetMetaInfo(const uint256& proTxHash, bool fCreate) +CMasternodeMetaInfoPtr CMasternodeMetaMan::GetMetaInfo(const uint256& proTxHash, bool fCreate) LOCKS_EXCLUDED(cs) { LOCK(cs); auto it = metaInfos.find(proTxHash); @@ -115,7 +115,7 @@ bool CMasternodeMetaMan::AddGovernanceVote(const uint256& proTxHash, const uint2 return true; } -void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObjectHash) +void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObjectHash) LOCKS_EXCLUDED(cs) { LOCK(cs); for(const auto& p : metaInfos) { @@ -123,20 +123,20 @@ void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObject } } -std::vector CMasternodeMetaMan::GetAndClearDirtyGovernanceObjectHashes() +std::vector CMasternodeMetaMan::GetAndClearDirtyGovernanceObjectHashes() LOCKS_EXCLUDED(cs) { std::vector vecTmp; WITH_LOCK(cs, vecTmp.swap(vecDirtyGovernanceObjectHashes)); return vecTmp; } -bool CMasternodeMetaMan::AlreadyHavePlatformBan(const uint256& inv_hash) const +bool CMasternodeMetaMan::AlreadyHavePlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs) { LOCK(cs); return m_seen_platform_bans.exists(inv_hash); } -std::optional CMasternodeMetaMan::GetPlatformBan(const uint256& inv_hash) const +std::optional CMasternodeMetaMan::GetPlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs) { LOCK(cs); PlatformBanMessage ret; @@ -147,13 +147,13 @@ std::optional CMasternodeMetaMan::GetPlatformBan(const uint2 return ret; } -void CMasternodeMetaMan::RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) +void CMasternodeMetaMan::RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) LOCKS_EXCLUDED(cs) { LOCK(cs); m_seen_platform_bans.insert(inv_hash, std::move(msg)); } -std::string MasternodeMetaStore::ToString() const +std::string MasternodeMetaStore::ToString() const LOCKS_EXCLUDED(cs) { LOCK(cs); return strprintf("Masternodes: meta infos object count: %d, nDsqCount: %d", metaInfos.size(), nDsqCount); diff --git a/src/masternode/meta.h b/src/masternode/meta.h index e06a10cceed9..39e4902cf446 100644 --- a/src/masternode/meta.h +++ b/src/masternode/meta.h @@ -35,7 +35,7 @@ class CMasternodeMetaInfo friend class CMasternodeMetaMan; private: - mutable RecursiveMutex cs; + mutable Mutex cs; uint256 proTxHash GUARDED_BY(cs); @@ -127,14 +127,14 @@ class MasternodeMetaStore protected: static const std::string SERIALIZATION_VERSION_STRING; - mutable RecursiveMutex cs; + mutable Mutex cs; std::map metaInfos GUARDED_BY(cs); // keep track of dsq count to prevent masternodes from gaming coinjoin queue std::atomic nDsqCount{0}; public: template - void Serialize(Stream &s) const + void Serialize(Stream &s) const LOCKS_EXCLUDED(cs) { LOCK(cs); std::vector tmpMetaInfo; @@ -145,7 +145,7 @@ class MasternodeMetaStore } template - void Unserialize(Stream &s) + void Unserialize(Stream &s) LOCKS_EXCLUDED(cs) { Clear(); @@ -163,14 +163,14 @@ class MasternodeMetaStore } } - void Clear() + void Clear() LOCKS_EXCLUDED(cs) { LOCK(cs); metaInfos.clear(); } - std::string ToString() const; + std::string ToString() const LOCKS_EXCLUDED(cs); }; /** @@ -233,7 +233,7 @@ class CMasternodeMetaMan : public MasternodeMetaStore bool IsValid() const { return is_valid; } - CMasternodeMetaInfoPtr GetMetaInfo(const uint256& proTxHash, bool fCreate = true); + CMasternodeMetaInfoPtr GetMetaInfo(const uint256& proTxHash, bool fCreate = true) LOCKS_EXCLUDED(cs); int64_t GetDsqCount() const { return nDsqCount; } int64_t GetDsqThreshold(const uint256& proTxHash, int nMnCount); @@ -242,13 +242,13 @@ class CMasternodeMetaMan : public MasternodeMetaStore void DisallowMixing(const uint256& proTxHash); bool AddGovernanceVote(const uint256& proTxHash, const uint256& nGovernanceObjectHash); - void RemoveGovernanceObject(const uint256& nGovernanceObjectHash); + void RemoveGovernanceObject(const uint256& nGovernanceObjectHash) LOCKS_EXCLUDED(cs); - std::vector GetAndClearDirtyGovernanceObjectHashes(); + std::vector GetAndClearDirtyGovernanceObjectHashes() LOCKS_EXCLUDED(cs); - bool AlreadyHavePlatformBan(const uint256& inv_hash) const; - std::optional GetPlatformBan(const uint256& inv_hash) const; - void RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg); + bool AlreadyHavePlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs); + std::optional GetPlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs); + void RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) LOCKS_EXCLUDED(cs); }; #endif // BITCOIN_MASTERNODE_META_H From 01f74a2b0e08a03db8f382e65c1b6e8e0940ae4d Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Wed, 1 Oct 2025 19:32:33 +0300 Subject: [PATCH 019/656] fix: close "review transaction" span tag --- src/qt/sendcoinsdialog.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 8e5253b61ff2..bc5011db329e 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -383,6 +383,7 @@ bool SendCoinsDialog::send(const QList& recipients, QString& /*: Text to prompt a user to review the details of the transaction they are attempting to send. */ question_string.append(tr("Please, review your transaction.")); } + question_string.append(""); question_string.append("

"); question_string.append(formatted_short.join("
")); question_string.append("
"); From 41fca83575d748e9880216268170dca413996d9c Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 24 Mar 2022 19:56:24 +0100 Subject: [PATCH 020/656] Merge bitcoin/bitcoin#24653: test: use `MiniWallet` in `test/functional/interface_zmq` bc90b8d86916d43867762a391633664676550bd8 [move only] remove `is_wallet_compiled` checks (josibake) 0bfbf7fb2488753d795ffc1c63a8977e4fe4a3bc test: use MiniWallet in `interfaces_zmq` (josibake) Pull request description: While working on #24584 , `interface_zmq` started failing due to coin selection not running deterministically. The test doesn't actually need the wallet, so this PR migrates it to use MiniWallet _Note for reviewers:_ the second commit moves large chunks of code out of an if block, so it may be helpful to review with something that ignores whitespace, e.g `git diff -w master` ACKs for top commit: vincenzopalazzo: ACK https://github.com/bitcoin/bitcoin/pull/24653/commits/bc90b8d86916d43867762a391633664676550bd8 Tree-SHA512: c618e23d00635d72dafdef28e68cbc88b9cc2030d4898fc5b7eac926fd621684c1958c075ed167192716b18308da5a0c1f1393396e31b99d0d3bde78b78fefc5 --- test/functional/interface_zmq.py | 249 +++++++++---------- test/functional/test_framework/blocktools.py | 5 +- 2 files changed, 121 insertions(+), 133 deletions(-) diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index f467982d0233..002efe3070a8 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -13,7 +13,6 @@ from test_framework.messages import ( dashhash, hash256, - tx_from_hex, ) from test_framework.util import ( assert_equal, @@ -21,6 +20,10 @@ p2p_port, ) from test_framework.netutil import test_ipv6_local, test_unix_socket + +from test_framework.wallet import ( + MiniWallet, +) from time import sleep # Test may be skipped and not have zmq installed @@ -100,8 +103,6 @@ class ZMQTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.disable_mocktime = True - if self.is_wallet_compiled(): - self.requires_wallet = True # This test isn't testing txn relay/timing, so set whitelist on the # peers for instant txn relay. This speeds up the test run time 2-3x. self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes @@ -114,6 +115,7 @@ def skip_test_if_missing_module(self): self.skip_if_no_bdb() def run_test(self): + self.wallet = MiniWallet(self.nodes[0]) self.ctx = zmq.Context() try: self.test_basic() @@ -223,28 +225,28 @@ def test_basic(self, unix = False): assert_equal([txid.hex()], self.nodes[1].getblock(hash)["tx"]) - if self.is_wallet_compiled(): - self.log.info("Wait for tx from second node") - payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) - self.sync_all() + self.wallet.rescan_utxos() + self.log.info("Wait for tx from second node") + payment_tx = self.wallet.send_self_transfer(from_node=self.nodes[1]) + payment_txid = payment_tx['txid'] + self.sync_all() + # Should receive the broadcasted txid. + txid = hashtx.receive() + assert_equal(payment_txid, txid.hex()) - # Should receive the broadcasted txid. - txid = hashtx.receive() - assert_equal(payment_txid, txid.hex()) + # TODO: Add "R" sequence testing, potentially using txes replaced with + # islocked txes - # TODO: Add "R" sequence testing, potentially using txes replaced with - # islocked txes + # Should receive the broadcasted raw transaction. + hex = rawtx.receive() + assert_equal(payment_txid, hash256_reversed(hex).hex()) - # Should receive the broadcasted raw transaction. - hex = rawtx.receive() - assert_equal(payment_txid, hash256_reversed(hex).hex()) - - # Mining the block with this tx should result in second notification - # after coinbase tx notification - self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE) - hashtx.receive() - txid = hashtx.receive() - assert_equal(payment_txid, txid.hex()) + # Mining the block with this tx should result in second notification + # after coinbase tx notification + self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE) + hashtx.receive() + txid = hashtx.receive() + assert_equal(payment_txid, txid.hex()) self.log.info("Test the getzmqnotifications RPC") @@ -261,9 +263,6 @@ def test_basic(self, unix = False): def test_reorg(self): - if not self.is_wallet_compiled(): - self.log.info("Skipping reorg test because wallet is disabled") - return address = f"tcp://127.0.0.1:{self.zmq_port_base}" @@ -274,7 +273,7 @@ def test_reorg(self): self.disconnect_nodes(0, 1) # Generate 1 block in nodes[0] with 1 mempool tx and receive all notifications - payment_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) + payment_txid = self.wallet.send_self_transfer(from_node=self.nodes[0])['txid'] disconnect_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE, sync_fun=self.no_op)[0] disconnect_cb = self.nodes[0].getblock(disconnect_block)["tx"][0] assert_equal(self.nodes[0].getbestblockhash(), hashblock.receive().hex()) @@ -343,113 +342,105 @@ def test_sequence(self): assert_equal((self.nodes[1].getblockhash(block_count-1), "C", None), seq.receive_sequence()) assert_equal((self.nodes[1].getblockhash(block_count), "C", None), seq.receive_sequence()) - # Rest of test requires wallet functionality - if self.is_wallet_compiled(): - self.log.info("Wait for tx from second node") - payment_txid = self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=5.0) - self.sync_all() - self.log.info("Testing sequence notifications with mempool sequence values") - - # Should receive the broadcasted txid. - assert_equal((payment_txid, "A", seq_num), seq.receive_sequence()) - seq_num += 1 - - # Doesn't get published when mined, make a block and tx to "flush" the possibility - # though the mempool sequence number does go up by the number of transactions - # removed from the mempool by the block mining it. - mempool_size = len(self.nodes[0].getrawmempool()) - c_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)[0] - # Make sure the number of mined transactions matches the number of txs out of mempool - mempool_size_delta = mempool_size - len(self.nodes[0].getrawmempool()) - assert_equal(len(self.nodes[0].getblock(c_block)["tx"])-1, mempool_size_delta) - seq_num += mempool_size_delta - payment_txid_2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) - self.sync_all() - assert_equal((c_block, "C", None), seq.receive_sequence()) - assert_equal((payment_txid_2, "A", seq_num), seq.receive_sequence()) - seq_num += 1 - - # Spot check getrawmempool results that they only show up when asked for - assert type(self.nodes[0].getrawmempool()) is list - assert type(self.nodes[0].getrawmempool(mempool_sequence=False)) is list - assert "mempool_sequence" not in self.nodes[0].getrawmempool(verbose=True) - assert_raises_rpc_error(-8, "Verbose results cannot contain mempool sequence values.", self.nodes[0].getrawmempool, True, True) - assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], seq_num) - - self.log.info("Testing reorg notifications") - # Manually invalidate the last block to test mempool re-entry - # N.B. This part could be made more lenient in exact ordering - # since it greatly depends on inner-workings of blocks/mempool - # during "deep" re-orgs. Probably should "re-construct" - # blockchain/mempool state from notifications instead. - block_count = self.nodes[0].getblockcount() - best_hash = self.nodes[0].getbestblockhash() - self.nodes[0].invalidateblock(best_hash) - sleep(2) # Bit of room to make sure transaction things happened - - # Make sure getrawmempool mempool_sequence results aren't "queued" but immediately reflective - # of the time they were gathered. - assert self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] > seq_num - - assert_equal((best_hash, "D", None), seq.receive_sequence()) - assert_equal((payment_txid, "A", seq_num), seq.receive_sequence()) - seq_num += 1 - - # Other things may happen but aren't wallet-deterministic so we don't test for them currently - self.nodes[0].reconsiderblock(best_hash) - self.generatetoaddress(self.nodes[1], 1, ADDRESS_BCRT1_UNSPENDABLE) - - self.log.info("Evict mempool transaction by block conflict") - orig_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0) - - # More to be simply mined - more_tx = [] - for _ in range(5): - more_tx.append(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.1)) - - raw_tx = self.nodes[0].getrawtransaction(orig_txid) - # Mine the tx - block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1)) - tx = tx_from_hex(raw_tx) - block.vtx.append(tx) - for txid in more_tx: - tx = tx_from_hex(self.nodes[0].getrawtransaction(txid)) - block.vtx.append(tx) - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - assert_equal(self.nodes[0].submitblock(block.serialize().hex()), None) - tip = self.nodes[0].getbestblockhash() - assert_equal(int(tip, 16), block.sha256) - orig_txid_2 = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0) - - # Flush old notifications until evicted tx original entry + self.log.info("Wait for tx from second node") + payment_tx = self.wallet.send_self_transfer(from_node=self.nodes[1]) + payment_txid = payment_tx['txid'] + self.sync_all() + self.log.info("Testing sequence notifications with mempool sequence values") + + # Should receive the broadcasted txid. + assert_equal((payment_txid, "A", seq_num), seq.receive_sequence()) + seq_num += 1 + + # Doesn't get published when mined, make a block and tx to "flush" the possibility + # though the mempool sequence number does go up by the number of transactions + # removed from the mempool by the block mining it. + mempool_size = len(self.nodes[0].getrawmempool()) + c_block = self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)[0] + # Make sure the number of mined transactions matches the number of txs out of mempool + mempool_size_delta = mempool_size - len(self.nodes[0].getrawmempool()) + assert_equal(len(self.nodes[0].getblock(c_block)["tx"])-1, mempool_size_delta) + seq_num += mempool_size_delta + payment_txid_2 = self.wallet.send_self_transfer(from_node=self.nodes[1])['txid'] + self.sync_all() + assert_equal((c_block, "C", None), seq.receive_sequence()) + assert_equal((payment_txid_2, "A", seq_num), seq.receive_sequence()) + seq_num += 1 + + # Spot check getrawmempool results that they only show up when asked for + assert type(self.nodes[0].getrawmempool()) is list + assert type(self.nodes[0].getrawmempool(mempool_sequence=False)) is list + assert "mempool_sequence" not in self.nodes[0].getrawmempool(verbose=True) + assert_raises_rpc_error(-8, "Verbose results cannot contain mempool sequence values.", self.nodes[0].getrawmempool, True, True) + assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], seq_num) + + self.log.info("Testing reorg notifications") + # Manually invalidate the last block to test mempool re-entry + # N.B. This part could be made more lenient in exact ordering + # since it greatly depends on inner-workings of blocks/mempool + # during "deep" re-orgs. Probably should "re-construct" + # blockchain/mempool state from notifications instead. + block_count = self.nodes[0].getblockcount() + best_hash = self.nodes[0].getbestblockhash() + self.nodes[0].invalidateblock(best_hash) + sleep(2) # Bit of room to make sure transaction things happened + + # Make sure getrawmempool mempool_sequence results aren't "queued" but immediately reflective + # of the time they were gathered. + assert self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] > seq_num + + assert_equal((best_hash, "D", None), seq.receive_sequence()) + assert_equal((payment_txid, "A", seq_num), seq.receive_sequence()) + seq_num += 1 + + # Other things may happen but aren't wallet-deterministic so we don't test for them currently + self.nodes[0].reconsiderblock(best_hash) + self.generatetoaddress(self.nodes[1], 1, ADDRESS_BCRT1_UNSPENDABLE) + + self.log.info("Evict mempool transaction by block conflict") + orig_tx = self.wallet.send_self_transfer(from_node=self.nodes[0]) + orig_txid = orig_tx['txid'] + + # More to be simply mined + more_tx = [] + for _ in range(5): + more_tx.append(self.wallet.send_self_transfer(from_node=self.nodes[0])) + + # Mine the tx + txs_to_add = [orig_tx['hex']] + [tx['hex'] for tx in more_tx] + block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1), txlist=txs_to_add) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + assert_equal(self.nodes[0].submitblock(block.serialize().hex()), None) + tip = self.nodes[0].getbestblockhash() + assert_equal(int(tip, 16), block.sha256) + orig_txid_2 = self.wallet.send_self_transfer(from_node=self.nodes[0])['txid'] + + # Flush old notifications until evicted tx original entry + (hash_str, label, mempool_seq) = seq.receive_sequence() + while hash_str != orig_txid: (hash_str, label, mempool_seq) = seq.receive_sequence() - while hash_str != orig_txid: - (hash_str, label, mempool_seq) = seq.receive_sequence() - mempool_seq += 1 - - # Added original tx - assert_equal(label, "A") - # More transactions to be simply mined - for i in range(len(more_tx)): - assert_equal((more_tx[i], "A", mempool_seq), seq.receive_sequence()) - mempool_seq += 1 + mempool_seq += 1 + # Added original tx + assert_equal(label, "A") + # More transactions to be simply mined + for i in range(len(more_tx)): + assert_equal((more_tx[i]['txid'], "A", mempool_seq), seq.receive_sequence()) mempool_seq += 1 - assert_equal((tip, "C", None), seq.receive_sequence()) - mempool_seq += len(more_tx) - # Last tx - assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence()) - mempool_seq += 1 - self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE) + + mempool_seq += 1 + assert_equal((tip, "C", None), seq.receive_sequence()) + mempool_seq += len(more_tx) + # Last tx + assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence()) + mempool_seq += 1 + self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE) def test_mempool_sync(self): """ Use sequence notification plus getrawmempool sequence results to "sync mempool" """ - if not self.is_wallet_compiled(): - self.log.info("Skipping mempool sync test") - return self.log.info("Testing 'mempool sync' usage of sequence notifier") [seq] = self.setup_zmq_test([("sequence", f"tcp://127.0.0.1:{self.zmq_port_base}")]) @@ -460,10 +451,10 @@ def test_mempool_sync(self): # Some transactions have been happening but we aren't consuming zmq notifications yet # or we lost a ZMQ message somehow and want to start over - txids = [] + txs = [] num_txs = 5 for _ in range(num_txs): - txids.append(self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0)) + txs.append(self.wallet.send_self_transfer(from_node=self.nodes[1])) self.sync_all() # 1) Consume backlog until we get a mempool sequence number @@ -488,10 +479,10 @@ def test_mempool_sync(self): # Things continue to happen in the "interim" while waiting for snapshot results for _ in range(num_txs): - txids.append(self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1)) + txs.append(self.wallet.send_self_transfer(from_node=self.nodes[0])['txid']) self.sync_all() self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE) - final_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1) + final_txid = self.wallet.send_self_transfer(from_node=self.nodes[0])['txid'] # 3) Consume ZMQ backlog until we get to "now" for the mempool snapshot while True: diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 31f4df13f1cd..d451ab17359a 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -5,7 +5,6 @@ """Utilities for manipulating blocks and transactions.""" from decimal import Decimal -import io import struct import time import unittest @@ -66,9 +65,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl if txlist: for tx in txlist: if not hasattr(tx, 'calc_sha256'): - txo = CTransaction() - txo.deserialize(io.BytesIO(tx)) - tx = txo + tx = tx_from_hex(tx) block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() From 4e18b94772fd6c225207cc4f2d6656f15f9a4dde Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Fri, 25 Mar 2022 11:31:23 +0100 Subject: [PATCH 021/656] Merge bitcoin/bitcoin#24667: ci: Compile fuzz binary in periodic task fa6e47d85be42b3065ad530257e5d4c23afcade3 ci: Compile fuzz binary in periodic task (MarcoFalke) Pull request description: There is one task that is re-run periodically by CI about once a week for all pull request to detect silent merge conflict before merge. It would be nice if this task also checked for silent merge conflict in the fuzz binary. ACKs for top commit: fanquake: ACK fa6e47d85be42b3065ad530257e5d4c23afcade3 green CI vincenzopalazzo: ACK https://github.com/bitcoin/bitcoin/pull/24667/commits/fa6e47d85be42b3065ad530257e5d4c23afcade3 Tree-SHA512: fd5b02dc72af7a4ba973919d62c3aad11569f86a93622e2e56330ea9d379f1f015dfd5cb7efbf00718b306103028bc10c7e935579c2337fb506bd7fe3e145e1c --- ci/test/00_setup_env_native_qt5.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 78aeef1f0aec..a455569645a7 100755 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -15,4 +15,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.12.1.5 v0.15.0.0 v0.16.1.1 v0.17.0.3 v18.2.2 v19.3.0 v20.1.1 v21.1.1" -export BITCOIN_CONFIG="--enable-zmq --with-libs=no --enable-reduce-exports --disable-fuzz-binary LDFLAGS=-static-libstdc++ --with-boost-process" +export BITCOIN_CONFIG="--enable-zmq --with-libs=no --enable-reduce-exports LDFLAGS=-static-libstdc++ --with-boost-process" From 8fe133360a50e5b9793dba900112fc559270eb3d Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Fri, 25 Mar 2022 16:52:55 +0100 Subject: [PATCH 022/656] Merge bitcoin/bitcoin#24670: test: move-only: Move all generate* tests to a single file 0000ff0d6b2442706a588fd906ebf1adf8ff8226 test: move-only: Move all generate* tests to a single file (MarcoFalke) Pull request description: Seems a bit overkill to spread tests for the `generate*` methods over several files. Combining them into a single file has also a nice side-effect of requiring less node (re)starts, which are expensive in valgrind. ACKs for top commit: glozow: utACK 0000ff0d6b2442706a588fd906ebf1adf8ff8226 Tree-SHA512: 8269eb05649a871011bbfbd1838d0f7d1dac4a35b3b198fc43fe85131fda8a53803b75da78cbf422eabf086006dee4421e622fbe706f6781a3848b989024001b --- test/functional/rpc_generate.py | 83 ++++++++++++++++++++++++- test/functional/rpc_generateblock.py | 91 ---------------------------- test/functional/test_runner.py | 1 - test/functional/wallet_disable.py | 6 +- 4 files changed, 83 insertions(+), 98 deletions(-) delete mode 100755 test/functional/rpc_generateblock.py diff --git a/test/functional/rpc_generate.py b/test/functional/rpc_generate.py index 47d7814da37a..77a0388f3876 100755 --- a/test/functional/rpc_generate.py +++ b/test/functional/rpc_generate.py @@ -2,9 +2,10 @@ # Copyright (c) 2020-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test generate RPC.""" +"""Test generate* RPCs.""" from test_framework.test_framework import BitcoinTestFramework +from test_framework.wallet import MiniWallet from test_framework.util import ( assert_equal, assert_raises_rpc_error, @@ -16,6 +17,86 @@ def set_test_params(self): self.num_nodes = 1 def run_test(self): + self.test_generatetoaddress() + self.test_generate() + self.test_generateblock() + + def test_generatetoaddress(self): + self.generatetoaddress(self.nodes[0], 1, 'ycwedq2f3sz2Yf9JqZsBCQPxp18WU3Hp4J') + assert_raises_rpc_error(-5, "Invalid address", self.generatetoaddress, self.nodes[0], 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') + + def test_generateblock(self): + node = self.nodes[0] + miniwallet = MiniWallet(node) + miniwallet.rescan_utxos() + + self.log.info('Generate an empty block to address') + address = miniwallet.get_address() + hash = self.generateblock(node, output=address, transactions=[])['hash'] + block = node.getblock(blockhash=hash, verbose=2) + assert_equal(len(block['tx']), 1) + assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address) + + self.log.info('Generate an empty block to a descriptor') + hash = self.generateblock(node, 'addr(' + address + ')', [])['hash'] + block = node.getblock(blockhash=hash, verbosity=2) + assert_equal(len(block['tx']), 1) + assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address) + + self.log.info('Generate an empty block to a combo descriptor with compressed pubkey') + combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798' + combo_address = 'yWziQMcwmKjRdzi7eWjwiQX8EjWcd6dSg6' + hash = self.generateblock(node, 'combo(' + combo_key + ')', [])['hash'] + block = node.getblock(hash, 2) + assert_equal(len(block['tx']), 1) + assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address) + + # Generate some extra mempool transactions to verify they don't get mined + for _ in range(10): + miniwallet.send_self_transfer(from_node=node) + + self.log.info('Generate block with txid') + txid = miniwallet.send_self_transfer(from_node=node)['txid'] + hash = self.generateblock(node, address, [txid])['hash'] + block = node.getblock(hash, 1) + assert_equal(len(block['tx']), 2) + assert_equal(block['tx'][1], txid) + + self.log.info('Generate block with raw tx') + rawtx = miniwallet.create_self_transfer()['hex'] + hash = self.generateblock(node, address, [rawtx])['hash'] + + block = node.getblock(hash, 1) + assert_equal(len(block['tx']), 2) + txid = block['tx'][1] + assert_equal(node.getrawtransaction(txid=txid, verbose=False, blockhash=hash), rawtx) + + self.log.info('Fail to generate block with out of order txs') + txid1 = miniwallet.send_self_transfer(from_node=node)['txid'] + utxo1 = miniwallet.get_utxo(txid=txid1) + rawtx2 = miniwallet.create_self_transfer(utxo_to_spend=utxo1)['hex'] + assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1]) + + self.log.info('Fail to generate block with txid not in mempool') + missing_txid = '0000000000000000000000000000000000000000000000000000000000000000' + assert_raises_rpc_error(-5, 'Transaction ' + missing_txid + ' not in mempool.', self.generateblock, node, address, [missing_txid]) + + self.log.info('Fail to generate block with invalid raw tx') + invalid_raw_tx = '0000' + assert_raises_rpc_error(-22, 'Transaction decode failed for ' + invalid_raw_tx, self.generateblock, node, address, [invalid_raw_tx]) + + self.log.info('Fail to generate block with invalid address/descriptor') + assert_raises_rpc_error(-5, 'Invalid address or descriptor', self.generateblock, node, '1234', []) + + self.log.info('Fail to generate block with a ranged descriptor') + ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)' + assert_raises_rpc_error(-8, 'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?', self.generateblock, node, ranged_descriptor, []) + + self.log.info('Fail to generate block with a descriptor missing a private key') + child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)' + assert_raises_rpc_error(-5, 'Cannot derive script without private keys', self.generateblock, node, child_descriptor, []) + + def test_generate(self): message = ( "generate\n\n" "has been replaced by the -generate " diff --git a/test/functional/rpc_generateblock.py b/test/functional/rpc_generateblock.py deleted file mode 100755 index 9fce143990e1..000000000000 --- a/test/functional/rpc_generateblock.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2020 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -'''Test generateblock rpc. -''' - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.wallet import MiniWallet -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, -) - -class GenerateBlockTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def run_test(self): - node = self.nodes[0] - miniwallet = MiniWallet(node) - miniwallet.rescan_utxos() - - self.log.info('Generate an empty block to address') - address = miniwallet.get_address() - hash = self.generateblock(node, address, [])['hash'] - block = node.getblock(hash, 2) - assert_equal(len(block['tx']), 1) - assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address) - - self.log.info('Generate an empty block to a descriptor') - hash = self.generateblock(node, 'addr(' + address + ')', [])['hash'] - block = node.getblock(hash, 2) - assert_equal(len(block['tx']), 1) - assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], address) - - self.log.info('Generate an empty block to a combo descriptor with compressed pubkey') - combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798' - combo_address = 'yWziQMcwmKjRdzi7eWjwiQX8EjWcd6dSg6' - hash = self.generateblock(node, 'combo(' + combo_key + ')', [])['hash'] - block = node.getblock(hash, 2) - assert_equal(len(block['tx']), 1) - assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['address'], combo_address) - - # Generate some extra mempool transactions to verify they don't get mined - for _ in range(10): - miniwallet.send_self_transfer(from_node=node) - - self.log.info('Generate block with txid') - txid = miniwallet.send_self_transfer(from_node=node)['txid'] - hash = self.generateblock(node, address, [txid])['hash'] - block = node.getblock(hash, 1) - assert_equal(len(block['tx']), 2) - assert_equal(block['tx'][1], txid) - - self.log.info('Generate block with raw tx') - rawtx = miniwallet.create_self_transfer()['hex'] - hash = self.generateblock(node, address, [rawtx])['hash'] - - block = node.getblock(hash, 1) - assert_equal(len(block['tx']), 2) - txid = block['tx'][1] - assert_equal(node.getrawtransaction(txid=txid, verbose=False, blockhash=hash), rawtx) - - self.log.info('Fail to generate block with out of order txs') - txid1 = miniwallet.send_self_transfer(from_node=node)['txid'] - utxo1 = miniwallet.get_utxo(txid=txid1) - rawtx2 = miniwallet.create_self_transfer(utxo_to_spend=utxo1)['hex'] - assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1]) - - self.log.info('Fail to generate block with txid not in mempool') - missing_txid = '0000000000000000000000000000000000000000000000000000000000000000' - assert_raises_rpc_error(-5, 'Transaction ' + missing_txid + ' not in mempool.', self.generateblock, node, address, [missing_txid]) - - self.log.info('Fail to generate block with invalid raw tx') - invalid_raw_tx = '0000' - assert_raises_rpc_error(-22, 'Transaction decode failed for ' + invalid_raw_tx, self.generateblock, node, address, [invalid_raw_tx]) - - self.log.info('Fail to generate block with invalid address/descriptor') - assert_raises_rpc_error(-5, 'Invalid address or descriptor', self.generateblock, node, '1234', []) - - self.log.info('Fail to generate block with a ranged descriptor') - ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)' - assert_raises_rpc_error(-8, 'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?', self.generateblock, node, ranged_descriptor, []) - - self.log.info('Fail to generate block with a descriptor missing a private key') - child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)' - assert_raises_rpc_error(-5, 'Cannot derive script without private keys', self.generateblock, node, child_descriptor, []) - -if __name__ == '__main__': - GenerateBlockTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 2c1a8597f535..f3ba40f9b8cb 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -287,7 +287,6 @@ 'p2p_net_deadlock.py --v2transport', 'wallet_signmessagewithaddress.py', 'rpc_signmessagewithprivkey.py', - 'rpc_generateblock.py', 'rpc_generate.py', 'wallet_balance.py --legacy-wallet', 'wallet_balance.py --descriptors', diff --git a/test/functional/wallet_disable.py b/test/functional/wallet_disable.py index de8cd0af0b24..05b1b53d7105 100755 --- a/test/functional/wallet_disable.py +++ b/test/functional/wallet_disable.py @@ -26,10 +26,6 @@ def run_test (self): x = self.nodes[0].validateaddress('ycwedq2f3sz2Yf9JqZsBCQPxp18WU3Hp4J') assert x['isvalid'] == True - # Checking mining to an address without a wallet. Generating to a valid address should succeed - # but generating to an invalid address will fail. - self.generatetoaddress(self.nodes[0], 1, 'ycwedq2f3sz2Yf9JqZsBCQPxp18WU3Hp4J') - assert_raises_rpc_error(-5, "Invalid address", self.generatetoaddress, self.nodes[0], 1, '7TSBtVu959hGEGPKyHjJz9k55RpWrPffXz') if __name__ == '__main__': - DisableWalletTest ().main () + DisableWalletTest().main() From 65db76fefd6b116781aff556996c63b32c81778b Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 28 Mar 2022 08:53:29 +0200 Subject: [PATCH 023/656] Merge bitcoin/bitcoin#24656: refactor: Move mempool RPCs to rpc/mempool fac5a51c47fba21678fb35805e40d00fe7c891a0 Move mempool RPCs to rpc/mempool (MarcoFalke) fa0f666dd7b55a5a0250c5e35d2c3dfd565463b7 style: Add static keyword where possible in rpc/mempool (MarcoFalke) Pull request description: This moves the remaining mempool RPCs to `rpc/mempool`. Previously all mempool RPCs from the `blockchain` category have been moved. This patch moves the ones from the `rawtransactions` category. In the future, as a follow-up to this refactoring patch, it could be considered whether a new `mempool` category should be introduced. Beside a clearer code organization, this pull request should also reduce the compile time and space of the `rawtransactions.cpp` file. ACKs for top commit: promag: Code review ACK fac5a51c47fba21678fb35805e40d00fe7c891a0. Tree-SHA512: 5578b894b68d0595869a9b03ed8dceebe3366f73dec5f090ccc36ff4002b1bc4d58af77546c2d71537c1be03694d9a28c4b1bfbb3569560997879293c5c0301e --- src/rpc/mempool.cpp | 230 +++++++++++++++++++++++++++++++++++-- src/rpc/rawtransaction.cpp | 210 --------------------------------- 2 files changed, 222 insertions(+), 218 deletions(-) diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index ab527f39f824..165ea861df88 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -14,14 +14,225 @@ #include #include #include +#include #include #include #include +using node::DEFAULT_MAX_RAW_TX_FEE_RATE; using node::NodeContext; -static std::vector MempoolEntryDescription() { return { +RPCHelpMan sendrawtransaction() +{ + return RPCHelpMan{"sendrawtransaction", + "\nSubmit a raw transaction (serialized, hex-encoded) to local node and network.\n" + "\nThe transaction will be sent unconditionally to all peers, so using sendrawtransaction\n" + "for manual rebroadcast may degrade privacy by leaking the transaction's origin, as\n" + "nodes will normally not rebroadcast non-wallet transactions already in their mempool.\n" + "\nA specific exception, RPC_TRANSACTION_ALREADY_IN_CHAIN, may throw if the transaction cannot be added to the mempool.\n" + "\nRelated RPCs: createrawtransaction, signrawtransactionwithkey\n", + { + {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"}, + {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, + "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + + "/kB.\nSet to 0 to accept any fee rate.\n"}, + {"instantsend", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Deprecated and ignored"}, + {"bypasslimits", RPCArg::Type::BOOL, RPCArg::Default{false}, "Bypass transaction policy limits"}, + }, + RPCResult{ + RPCResult::Type::STR_HEX, "", "The transaction hash in hex" + }, + RPCExamples{ + "\nCreate a transaction\n" + + HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" \"{\\\"myaddress\\\":0.01}\"") + + "Sign the transaction, and get back the hex\n" + + HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") + + "\nSend the transaction (signed hex)\n" + + HelpExampleCli("sendrawtransaction", "\"signedhex\"") + + "\nAs a JSON-RPC call\n" + + HelpExampleRpc("sendrawtransaction", "\"signedhex\"") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + RPCTypeCheck(request.params, { + UniValue::VSTR, + UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() + UniValue::VBOOL + }); + + CMutableTransaction mtx; + if (!DecodeHexTx(mtx, request.params[0].get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); + } + CTransactionRef tx(MakeTransactionRef(std::move(mtx))); + + const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ? + DEFAULT_MAX_RAW_TX_FEE_RATE : + CFeeRate(AmountFromValue(request.params[1])); + + int64_t virtual_size = GetVirtualTransactionSize(*tx); + CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size); + + bool bypass_limits = false; + if (!request.params[3].isNull()) bypass_limits = request.params[3].get_bool(); + bilingual_str err_string; + AssertLockNotHeld(cs_main); + NodeContext& node = EnsureAnyNodeContext(request.context); + const TransactionError err = BroadcastTransaction(node, tx, err_string, max_raw_tx_fee, /*relay=*/true, /*wait_callback=*/true, bypass_limits); + if (TransactionError::OK != err) { + throw JSONRPCTransactionError(err, err_string.original); + } + + return tx->GetHash().GetHex(); + }, + }; +} + +static RPCHelpMan testmempoolaccept() +{ + return RPCHelpMan{"testmempoolaccept", + "\nReturns result of mempool acceptance tests indicating if raw transaction (serialized, hex-encoded) would be accepted by mempool.\n" + "\nIf multiple transactions are passed in, parents must come before children and package policies apply: the transactions cannot conflict with any mempool transactions or each other.\n" + "\nIf one transaction fails, other transactions may not be fully validated (the 'allowed' key will be blank).\n" + "\nThe maximum number of transactions allowed is " + ToString(MAX_PACKAGE_COUNT) + ".\n" + "\nThis checks if transactions violate the consensus or policy rules.\n" + "\nSee sendrawtransaction call.\n", + { + {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of hex strings of raw transactions.", + { + {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""}, + }, + }, + {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, + "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"}, + }, + RPCResult{ + RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n" + "Returns results for each transaction in the same order they were passed in.\n" + "Transactions that cannot be fully validated due to failures in other transactions will not contain an 'allowed' result.\n", + { + {RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"}, + {RPCResult::Type::STR, "package-error", /*optional=*/true, "Package validation error, if any (only possible if rawtxs had more than 1 transaction)."}, + {RPCResult::Type::BOOL, "allowed", /*optional=*/true, "Whether this tx would be accepted to the mempool and pass client-specified maxfeerate. " + "If not present, the tx was not fully validated due to a failure in another tx in the list."}, + {RPCResult::Type::NUM, "vsize", /*optional=*/true, "Transaction size."}, + {RPCResult::Type::OBJ, "fees", /*optional=*/true, "Transaction fees (only present if 'allowed' is true)", + { + {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT}, + }}, + {RPCResult::Type::STR, "reject-reason", /*optional=*/true, "Rejection string (only present when 'allowed' is false)"}, + }}, + } + }, + RPCExamples{ + "\nCreate a transaction\n" + + HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" \"{\\\"myaddress\\\":0.01}\"") + + "Sign the transaction, and get back the hex\n" + + HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") + + "\nTest acceptance of the transaction (signed hex)\n" + + HelpExampleCli("testmempoolaccept", R"('["signedhex"]')") + + "\nAs a JSON-RPC call\n" + + HelpExampleRpc("testmempoolaccept", "[\"signedhex\"]") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue + { + RPCTypeCheck(request.params, { + UniValue::VARR, + UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() + }); + const UniValue raw_transactions = request.params[0].get_array(); + if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) { + throw JSONRPCError(RPC_INVALID_PARAMETER, + "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions."); + } + + const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ? + DEFAULT_MAX_RAW_TX_FEE_RATE : + CFeeRate(AmountFromValue(request.params[1])); + + std::vector txns; + txns.reserve(raw_transactions.size()); + for (const auto& rawtx : raw_transactions.getValues()) { + CMutableTransaction mtx; + if (!DecodeHexTx(mtx, rawtx.get_str())) { + throw JSONRPCError(RPC_DESERIALIZATION_ERROR, + "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input."); + } + txns.emplace_back(MakeTransactionRef(std::move(mtx))); + } + + NodeContext& node = EnsureAnyNodeContext(request.context); + CTxMemPool& mempool = EnsureMemPool(node); + ChainstateManager& chainman = EnsureChainman(node); + CChainState& chainstate = chainman.ActiveChainstate(); + const PackageMempoolAcceptResult package_result = [&] { + LOCK(::cs_main); + if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true); + return PackageMempoolAcceptResult(txns[0]->GetHash(), + chainman.ProcessTransaction(txns[0], /*test_accept=*/true)); + }(); + + UniValue rpc_result(UniValue::VARR); + // We will check transaction fees while we iterate through txns in order. If any transaction fee + // exceeds maxfeerate, we will leave the rest of the validation results blank, because it + // doesn't make sense to return a validation result for a transaction if its ancestor(s) would + // not be submitted. + bool exit_early{false}; + for (const auto& tx : txns) { + UniValue result_inner(UniValue::VOBJ); + result_inner.pushKV("txid", tx->GetHash().GetHex()); + if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY) { + result_inner.pushKV("package-error", package_result.m_state.GetRejectReason()); + } + auto it = package_result.m_tx_results.find(tx->GetHash()); + if (exit_early || it == package_result.m_tx_results.end()) { + // Validation unfinished. Just return the txid. + rpc_result.push_back(result_inner); + continue; + } + const auto& tx_result = it->second; + // Package testmempoolaccept doesn't allow transactions to already be in the mempool. + CHECK_NONFATAL(tx_result.m_result_type != MempoolAcceptResult::ResultType::MEMPOOL_ENTRY); + if (tx_result.m_result_type == MempoolAcceptResult::ResultType::VALID) { + const CAmount fee = tx_result.m_base_fees.value(); + // Check that fee does not exceed maximum fee + const int64_t virtual_size = tx_result.m_vsize.value(); + const CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size); + if (max_raw_tx_fee && fee > max_raw_tx_fee) { + result_inner.pushKV("allowed", false); + result_inner.pushKV("reject-reason", "max-fee-exceeded"); + exit_early = true; + } else { + // Only return the fee and vsize if the transaction would pass ATMP. + // These can be used to calculate the feerate. + result_inner.pushKV("allowed", true); + result_inner.pushKV("vsize", virtual_size); + UniValue fees(UniValue::VOBJ); + fees.pushKV("base", ValueFromAmount(fee)); + result_inner.pushKV("fees", fees); + } + } else { + result_inner.pushKV("allowed", false); + const TxValidationState state = tx_result.m_state; + if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { + result_inner.pushKV("reject-reason", "missing-inputs"); + } else { + result_inner.pushKV("reject-reason", state.GetRejectReason()); + } + } + rpc_result.push_back(result_inner); + } + return rpc_result; + }, + }; +} + +static std::vector MempoolEntryDescription() +{ + return { RPCResult{RPCResult::Type::NUM, "vsize", "virtual transaction size. This can be different from actual serialized size for high-sigop transactions."}, RPCResult{RPCResult::Type::STR_AMOUNT, "fee", /*optional=*/true, "transaction fee, denominated in " + CURRENCY_UNIT + " (DEPRECATED, returned only if config option -deprecatedrpc=fees is passed)"}, @@ -53,7 +264,8 @@ static std::vector MempoolEntryDescription() { return { {RPCResult{RPCResult::Type::STR_HEX, "transactionid", "child transaction id"}}}, RPCResult{RPCResult::Type::BOOL, "instantsend", "True if this transaction was locked via InstantSend"}, RPCResult{RPCResult::Type::BOOL, "unbroadcast", "Whether this transaction is currently unbroadcast (initial broadcast not yet acknowledged by any peers)"} -};} + }; +} static void entryToJSON(const CTxMemPool& pool, UniValue& info, const CTxMemPoolEntry& e, const llmq::CInstantSendManager* isman) EXCLUSIVE_LOCKS_REQUIRED(pool.cs) { @@ -155,7 +367,7 @@ UniValue MempoolToJSON(const CTxMemPool& pool, const llmq::CInstantSendManager* } } -RPCHelpMan getrawmempool() +static RPCHelpMan getrawmempool() { return RPCHelpMan{"getrawmempool", "\nReturns all transaction ids in memory pool as a json array of string transaction ids.\n" @@ -209,7 +421,7 @@ RPCHelpMan getrawmempool() }; } -RPCHelpMan getmempoolancestors() +static RPCHelpMan getmempoolancestors() { return RPCHelpMan{"getmempoolancestors", "\nIf txid is in the mempool, returns all in-mempool ancestors.\n", @@ -276,7 +488,7 @@ RPCHelpMan getmempoolancestors() }; } -RPCHelpMan getmempooldescendants() +static RPCHelpMan getmempooldescendants() { return RPCHelpMan{"getmempooldescendants", "\nIf txid is in the mempool, returns all in-mempool descendants.\n", @@ -344,7 +556,7 @@ RPCHelpMan getmempooldescendants() }; } -RPCHelpMan getmempoolentry() +static RPCHelpMan getmempoolentry() { return RPCHelpMan{"getmempoolentry", "\nReturns mempool data for given transaction\n", @@ -400,7 +612,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool, const llmq::CInstantSendManag return ret; } -RPCHelpMan getmempoolinfo() +static RPCHelpMan getmempoolinfo() { return RPCHelpMan{"getmempoolinfo", "\nReturns details on the active state of the TX memory pool.\n", @@ -433,7 +645,7 @@ RPCHelpMan getmempoolinfo() }; } -RPCHelpMan savemempool() +static RPCHelpMan savemempool() { return RPCHelpMan{"savemempool", "\nDumps the mempool to disk. It will fail until the previous dump is fully loaded.\n", @@ -473,6 +685,8 @@ void RegisterMempoolRPCCommands(CRPCTable& t) static const CRPCCommand commands[]{ // category actor (function) // -------- ---------------- + {"rawtransactions", &sendrawtransaction}, + {"rawtransactions", &testmempoolaccept}, {"blockchain", &getmempoolancestors}, {"blockchain", &getmempooldescendants}, {"blockchain", &getmempoolentry}, diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 151e48a162e9..48869e1f27df 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -62,7 +61,6 @@ #include using node::AnalyzePSBT; -using node::DEFAULT_MAX_RAW_TX_FEE_RATE; using node::GetTransaction; using node::NodeContext; using node::PSBTAnalysis; @@ -1006,212 +1004,6 @@ static RPCHelpMan signrawtransactionwithkey() }; } -RPCHelpMan sendrawtransaction() -{ - return RPCHelpMan{"sendrawtransaction", "\nSubmit a raw transaction (serialized, hex-encoded) to local node and network.\n" - "\nThe transaction will be sent unconditionally to all peers, so using sendrawtransaction\n" - "for manual rebroadcast may degrade privacy by leaking the transaction's origin, as\n" - "nodes will normally not rebroadcast non-wallet transactions already in their mempool.\n" - "\nA specific exception, RPC_TRANSACTION_ALREADY_IN_CHAIN, may throw if the transaction cannot be added to the mempool.\n" - "\nRelated RPCs: createrawtransaction, signrawtransactionwithkey\n", - { - {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"}, - {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, - "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + - "/kB.\nSet to 0 to accept any fee rate.\n"}, - {"instantsend", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Deprecated and ignored"}, - {"bypasslimits", RPCArg::Type::BOOL, RPCArg::Default{false}, "Bypass transaction policy limits"}, - }, - RPCResult{ - RPCResult::Type::STR_HEX, "", "The transaction hash in hex" - }, - RPCExamples{ - "\nCreate a transaction\n" - + HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" \"{\\\"myaddress\\\":0.01}\"") + - "Sign the transaction, and get back the hex\n" - + HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") + - "\nSend the transaction (signed hex)\n" - + HelpExampleCli("sendrawtransaction", "\"signedhex\"") + - "\nAs a JSON-RPC call\n" - + HelpExampleRpc("sendrawtransaction", "\"signedhex\"") - }, - [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue -{ - RPCTypeCheck(request.params, { - UniValue::VSTR, - UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() - UniValue::VBOOL - }); - - CMutableTransaction mtx; - if (!DecodeHexTx(mtx, request.params[0].get_str())) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed. Make sure the tx has at least one input."); - } - CTransactionRef tx(MakeTransactionRef(std::move(mtx))); - - const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ? - DEFAULT_MAX_RAW_TX_FEE_RATE : - CFeeRate(AmountFromValue(request.params[1])); - - int64_t virtual_size = GetVirtualTransactionSize(*tx); - CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size); - - bool bypass_limits = false; - if (!request.params[3].isNull()) bypass_limits = request.params[3].get_bool(); - bilingual_str err_string; - AssertLockNotHeld(cs_main); - NodeContext& node = EnsureAnyNodeContext(request.context); - const TransactionError err = BroadcastTransaction(node, tx, err_string, max_raw_tx_fee, /*relay=*/true, /*wait_callback=*/true, bypass_limits); - if (TransactionError::OK != err) { - throw JSONRPCTransactionError(err, err_string.original); - } - - return tx->GetHash().GetHex(); -}, - }; -} - -static RPCHelpMan testmempoolaccept() -{ - return RPCHelpMan{"testmempoolaccept", - "\nReturns result of mempool acceptance tests indicating if raw transaction (serialized, hex-encoded) would be accepted by mempool.\n" - "\nIf multiple transactions are passed in, parents must come before children and package policies apply: the transactions cannot conflict with any mempool transactions or each other.\n" - "\nIf one transaction fails, other transactions may not be fully validated (the 'allowed' key will be blank).\n" - "\nThe maximum number of transactions allowed is " + ToString(MAX_PACKAGE_COUNT) + ".\n" - "\nThis checks if transactions violate the consensus or policy rules.\n" - "\nSee sendrawtransaction call.\n", - { - {"rawtxs", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of hex strings of raw transactions.", - { - {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""}, - }, - }, - {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, - "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kB\n"}, - }, - RPCResult{ - RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n" - "Returns results for each transaction in the same order they were passed in.\n" - "Transactions that cannot be fully validated due to failures in other transactions will not contain an 'allowed' result.\n", - { - {RPCResult::Type::OBJ, "", "", - { - {RPCResult::Type::STR_HEX, "txid", "The transaction hash in hex"}, - {RPCResult::Type::STR, "package-error", /*optional=*/true, "Package validation error, if any (only possible if rawtxs had more than 1 transaction)."}, - {RPCResult::Type::BOOL, "allowed", /*optional=*/true, "Whether this tx would be accepted to the mempool and pass client-specified maxfeerate. " - "If not present, the tx was not fully validated due to a failure in another tx in the list."}, - {RPCResult::Type::NUM, "vsize", /*optional=*/true, "Transaction size."}, - {RPCResult::Type::OBJ, "fees", /*optional=*/true, "Transaction fees (only present if 'allowed' is true)", - { - {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT}, - }}, - {RPCResult::Type::STR, "reject-reason", /*optional=*/true, "Rejection string (only present when 'allowed' is false)"}, - }}, - } - }, - RPCExamples{ - "\nCreate a transaction\n" - + HelpExampleCli("createrawtransaction", "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" \"{\\\"myaddress\\\":0.01}\"") + - "Sign the transaction, and get back the hex\n" - + HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") + - "\nTest acceptance of the transaction (signed hex)\n" - + HelpExampleCli("testmempoolaccept", R"('["signedhex"]')") + - "\nAs a JSON-RPC call\n" - + HelpExampleRpc("testmempoolaccept", "[\"signedhex\"]") - }, - [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue -{ - RPCTypeCheck(request.params, { - UniValue::VARR, - UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() - }); - const UniValue raw_transactions = request.params[0].get_array(); - if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) { - throw JSONRPCError(RPC_INVALID_PARAMETER, - "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions."); - } - - const CFeeRate max_raw_tx_fee_rate = request.params[1].isNull() ? - DEFAULT_MAX_RAW_TX_FEE_RATE : - CFeeRate(AmountFromValue(request.params[1])); - - std::vector txns; - txns.reserve(raw_transactions.size()); - for (const auto& rawtx : raw_transactions.getValues()) { - CMutableTransaction mtx; - if (!DecodeHexTx(mtx, rawtx.get_str())) { - throw JSONRPCError(RPC_DESERIALIZATION_ERROR, - "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input."); - } - txns.emplace_back(MakeTransactionRef(std::move(mtx))); - } - - NodeContext& node = EnsureAnyNodeContext(request.context); - CTxMemPool& mempool = EnsureMemPool(node); - ChainstateManager& chainman = EnsureChainman(node); - CChainState& chainstate = chainman.ActiveChainstate(); - const PackageMempoolAcceptResult package_result = [&] { - LOCK(::cs_main); - if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true); - return PackageMempoolAcceptResult(txns[0]->GetHash(), - chainman.ProcessTransaction(txns[0], /*test_accept=*/true)); - }(); - - UniValue rpc_result(UniValue::VARR); - // We will check transaction fees while we iterate through txns in order. If any transaction fee - // exceeds maxfeerate, we will leave the rest of the validation results blank, because it - // doesn't make sense to return a validation result for a transaction if its ancestor(s) would - // not be submitted. - bool exit_early{false}; - for (const auto& tx : txns) { - UniValue result_inner(UniValue::VOBJ); - result_inner.pushKV("txid", tx->GetHash().GetHex()); - if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY) { - result_inner.pushKV("package-error", package_result.m_state.GetRejectReason()); - } - auto it = package_result.m_tx_results.find(tx->GetHash()); - if (exit_early || it == package_result.m_tx_results.end()) { - // Validation unfinished. Just return the txid. - rpc_result.push_back(result_inner); - continue; - } - const auto& tx_result = it->second; - // Package testmempoolaccept doesn't allow transactions to already be in the mempool. - CHECK_NONFATAL(tx_result.m_result_type != MempoolAcceptResult::ResultType::MEMPOOL_ENTRY); - if (tx_result.m_result_type == MempoolAcceptResult::ResultType::VALID) { - const CAmount fee = tx_result.m_base_fees.value(); - // Check that fee does not exceed maximum fee - const int64_t virtual_size = tx_result.m_vsize.value(); - const CAmount max_raw_tx_fee = max_raw_tx_fee_rate.GetFee(virtual_size); - if (max_raw_tx_fee && fee > max_raw_tx_fee) { - result_inner.pushKV("allowed", false); - result_inner.pushKV("reject-reason", "max-fee-exceeded"); - exit_early = true; - } else { - // Only return the fee and vsize if the transaction would pass ATMP. - // These can be used to calculate the feerate. - result_inner.pushKV("allowed", true); - result_inner.pushKV("vsize", virtual_size); - UniValue fees(UniValue::VOBJ); - fees.pushKV("base", ValueFromAmount(fee)); - result_inner.pushKV("fees", fees); - } - } else { - result_inner.pushKV("allowed", false); - const TxValidationState state = tx_result.m_state; - if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { - result_inner.pushKV("reject-reason", "missing-inputs"); - } else { - result_inner.pushKV("reject-reason", state.GetRejectReason()); - } - } - rpc_result.push_back(result_inner); - } - return rpc_result; -}, - }; -} - static RPCHelpMan decodepsbt() { return RPCHelpMan{ @@ -2118,10 +1910,8 @@ static const CRPCCommand commands[] = { "rawtransactions", &createrawtransaction, }, { "rawtransactions", &decoderawtransaction, }, { "rawtransactions", &decodescript, }, - { "rawtransactions", &sendrawtransaction, }, { "rawtransactions", &combinerawtransaction, }, { "rawtransactions", &signrawtransactionwithkey, }, - { "rawtransactions", &testmempoolaccept, }, { "rawtransactions", &decodepsbt, }, { "rawtransactions", &combinepsbt, }, { "rawtransactions", &finalizepsbt, }, From f8effcfa892675b8c95a015b7da95278c25dfdd8 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 28 Mar 2022 11:50:36 +0100 Subject: [PATCH 024/656] Merge bitcoin/bitcoin#24691: Fix getpeerinfo doc faac877ffd38c3d8515c9e7b585546434f91b741 doc: Fix getpeerinfo doc (MarcoFalke) Pull request description: * Replace `node` with `peer` * Remove unused `\n` * Mark optional fields optional. See commit 9344697e57bd23d955df493d0581193ca1dc7cca, found by #23083 ACKs for top commit: fanquake: ACK faac877ffd38c3d8515c9e7b585546434f91b741 Tree-SHA512: ae4d52a0dcf8e132d9084e632d65fa835b1e7d0ed5c3d45a360570414d1e20bc7fb6500ff9be94b784af1dec5badcd1304153b1a4a59a6c484a87d8afd88b8bd --- src/rpc/net.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 866c5c858c76..e77eeba6cb8d 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -99,7 +99,7 @@ static RPCHelpMan getpeerinfo() { return RPCHelpMan{ "getpeerinfo", - "\nReturns data about each connected network node as a json array of objects.\n", + "Returns data about each connected network peer as a json array of objects.", {}, RPCResult{ RPCResult::Type::ARR, "", "", @@ -124,7 +124,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR_HEX, "verified_pubkey_hash", "Only present when the peer is a masternode and successfully " "authenticated via MNAUTH. In this case, this field contains the " "hash of the masternode's operator public key"}, - {RPCResult::Type::BOOL, "relaytxes", "Whether peer has asked us to relay transactions to it"}, + {RPCResult::Type::BOOL, "relaytxes", /*optional=*/true, "Whether peer has asked us to relay transactions to it"}, {RPCResult::Type::NUM_TIME, "lastsend", "The " + UNIX_EPOCH_TIME + " of the last send"}, {RPCResult::Type::NUM_TIME, "lastrecv", "The " + UNIX_EPOCH_TIME + " of the last receive"}, {RPCResult::Type::NUM_TIME, "last_transaction", "The " + UNIX_EPOCH_TIME + " of the last valid transaction received from this peer"}, From 732abd061941d81cec010d2a604c2e4b2a4748d6 Mon Sep 17 00:00:00 2001 From: laanwj <126646+laanwj@users.noreply.github.com> Date: Wed, 30 Mar 2022 13:41:45 +0200 Subject: [PATCH 025/656] Merge bitcoin/bitcoin#24374: contrib: refactor: simplify linearize scripts 254a63e097def1bf7e157c72ea9e4fff1eeb7a28 contrib: refactor: replace `hex_switchEndian` in linearize scripts (Sebastian Falbesoner) 3f863cfff1ec902454d64f82b12eb09bdb1eee04 contrib: refactor: simplify block header string routine in linearize-data.py (Sebastian Falbesoner) Pull request description: This PR simplifies the linearization scripts `linearize-data.py` and `linearize-hashes.py` by replacing overly complicated cruft (block header hash string calculation, hex string reverse) with means of the Python3 standard library. ACKs for top commit: laanwj: Code review ACK 254a63e097def1bf7e157c72ea9e4fff1eeb7a28 Tree-SHA512: 4a0e20e63bd11f23a190480b22dbc2f2a3070e2a4f3a01b8797f99bb5fc830185e91e6712c8ae97562f9a24a98aa4f19d52f02a3f5fcbe4c578ee88a41382d1d --- contrib/linearize/linearize-data.py | 38 +++------------------------ contrib/linearize/linearize-hashes.py | 7 +---- 2 files changed, 4 insertions(+), 41 deletions(-) diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 04721f4ccd52..6f2b5eb2ba5b 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -20,41 +20,9 @@ settings = {} -def hex_switchEndian(s): - """ Switches the endianness of a hex string (in pairs of hex chars) """ - pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)] - return b''.join(pairList[::-1]).decode() - -def uint32(x): - return x & 0xffffffff - -def bytereverse(x): - return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | - (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) - -def bufreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - word = struct.unpack('@I', in_buf[i:i+4])[0] - out_words.append(struct.pack('@I', bytereverse(word))) - return b''.join(out_words) - -def wordreverse(in_buf): - out_words = [] - for i in range(0, len(in_buf), 4): - out_words.append(in_buf[i:i+4]) - out_words.reverse() - return b''.join(out_words) - -def calc_hdr_hash(blk_hdr): - return dash_hash.getPoWHash(blk_hdr) - def calc_hash_str(blk_hdr): - hash = calc_hdr_hash(blk_hdr) - hash = bufreverse(hash) - hash = wordreverse(hash) - hash_str = hash.hex() - return hash_str + blk_hdr_hash = dash_hash.getPoWHash(blk_hdr) + return blk_hdr_hash[::-1].hex() def get_blk_dt(blk_hdr): members = struct.unpack(" Date: Sat, 2 Apr 2022 09:43:14 +0100 Subject: [PATCH 026/656] Merge bitcoin/bitcoin#24594: doc: update release-process.md ac45a43d894a50ada785c658c261446d6b881224 doc: update release-process.md (gruve-p) Pull request description: ACKs for top commit: achow101: ACK ac45a43d894a50ada785c658c261446d6b881224 Tree-SHA512: eb49310e5645fcab47c6a268221b688d0ae4ffb1cc6d66fc097da1feb61aa894fb00065c20cd2413b775b598f8c955d48240608107776e4f1016703d52a81212 --- doc/release-process.md | 50 +++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/doc/release-process.md b/doc/release-process.md index aca1c12e52da..e1f9cb5f4ed7 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -83,7 +83,7 @@ against other `guix-attest` signatures. git -C ./guix.sigs pull ``` -### Create the macOS SDK tarball: (first time, or when SDK version changes) +### Create the macOS SDK tarball (first time, or when SDK version changes) _Note: this step can be skipped if [our CI](https://github.com/dashpay/dash/blob/master/ci/test/00_setup_env.sh#L64) still uses bitcoin's SDK package (see SDK_URL)_ @@ -91,7 +91,7 @@ Create the macOS SDK tarball, see the [macOS build instructions](build-osx.md#deterministic-macos-app-notes) for details. -### Build and attest to build outputs: +### Build and attest to build outputs Follow the relevant Guix README.md sections: - [Building](/contrib/guix/README.md#building) @@ -99,16 +99,12 @@ Follow the relevant Guix README.md sections: _Note: we ship releases for only some supported HOSTs so consider providing limited `HOSTS` variable or run `./contrib/containers/guix/scripts/guix-start` instead of `./contrib/guix/guix-build` when building binaries for quicker builds that exclude the supported but not shipped HOSTs_ -### Verify other builders' signatures to your own. (Optional) +### Verify other builders' signatures to your own (optional) -Add other builders keys to your gpg keyring, and/or refresh keys: See `../dash/contrib/builder-keys/README.md`. - -Follow the relevant Guix README.md sections: +- [Add other builders keys to your gpg keyring, and/or refresh keys](/contrib/builder-keys/README.md) - [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations) -### Next steps: - -Commit your signature to `guix.sigs`: +### Commit your non codesigned signature to guix.sigs ```sh pushd guix.sigs @@ -118,24 +114,22 @@ git push # Assuming you can push to the guix.sigs tree popd ``` -Codesigner only: Create Windows/macOS detached signatures: -- Only one person handles codesigning. Everyone else should skip to the next step. -- Only once the Windows/macOS builds each have 3 matching signatures may they be signed with their respective release keys. +## Codesigning -Codesigner only: Sign the macOS binary: +### macOS codesigner only: Create detached macOS signatures (assuming [signapple](https://github.com/achow101/signapple/) is installed and up to date with master branch) * Transfer `dashcore-osx-unsigned.tar.gz` to macOS for signing * Extract and sign: ```sh tar xf dashcore-osx-unsigned.tar.gz - ./detached-sig-create.sh -s "Key ID" -o runtime + ./detached-sig-create.sh /path/to/codesign.p12 -o runtime ``` * Enter the keychain password and authorize the signature -* Move `signature-osx.tar.gz` back to the guix-build host +* `signature-osx.tar.gz` will be created -Codesigner only: Sign the windows binaries: +### Windows codesigner only: Create detached Windows signatures * Extract and sign: @@ -147,10 +141,11 @@ Codesigner only: Sign the windows binaries: * Enter the passphrase for the key when prompted * `signature-win.tar.gz` will be created -Code-signer only: It is advised to test that the code signature attaches properly prior to tagging by performing the `guix-codesign` step. -However if this is done, once the release has been tagged in the bitcoin-detached-sigs repo, the `guix-codesign` step must be performed again in order for the guix attestation to be valid when compared against the attestations of non-codesigner builds. +### Windows and macOS codesigners only: test code signatures +It is advised to test that the code signature attaches properly prior to tagging by performing the `guix-codesign` step. +However if this is done, once the release has been tagged in the dash-detached-sigs repo, the `guix-codesign` step must be performed again in order for the guix attestation to be valid when compared against the attestations of non-codesigner builds. -Codesigner only: Commit the detached codesign payloads: +### Windows and macOS codesigners only: Commit the detached codesign payloads ```sh pushd ~/dashcore-detached-sigs @@ -165,15 +160,20 @@ git push popd ``` -Non-codesigners: wait for Windows/macOS detached signatures: +### Non-codesigners: wait for Windows and macOS detached signatures -- Once the Windows/macOS builds each have 3 matching signatures, they will be signed with their respective release keys. +- Once the Windows and macOS builds each have 3 matching signatures, they will be signed with their respective release keys. - Detached signatures will then be committed to the [dash-detached-sigs](https://github.com/dashpay/dash-detached-sigs) repository, which can be combined with the unsigned apps to create signed binaries. -Create (and optionally verify) the codesigned outputs: -- [Codesigning](/contrib/guix/README.md#codesigning) +### Create the codesigned build outputs +- [Codesigning build outputs](/contrib/guix/README.md#codesigning-build-outputs) + +### Verify other builders' signatures to your own (optional) + +- [Add other builders keys to your gpg keyring, and/or refresh keys](/contrib/builder-keys/README.md) +- [Verifying build output attestations](/contrib/guix/README.md#verifying-build-output-attestations) -Commit your signature for the signed macOS/Windows binaries: +### Commit your codesigned signature to guix.sigs (for the signed macOS/Windows binaries) ```sh pushd ./guix.sigs @@ -183,7 +183,7 @@ git push # Assuming you can push to the guix.sigs tree popd ``` -### After 3 or more people have guix-built and their results match: +## After 3 or more people have guix-built and their results match * [ ] Combine the `all.SHA256SUMS.asc` file from all signers into `SHA256SUMS.asc`: ```sh From 0ff6e93124c9f84af4496baaa835a454c9bb5826 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Tue, 5 Apr 2022 09:10:58 +0200 Subject: [PATCH 027/656] Merge bitcoin/bitcoin#24583: doc: Add template for empty release notes fa4943e8df2e1048a5030f1d93776ca2b9da7b8a doc: Add template for empty release notes (MarcoFalke) Pull request description: * Move release process notes from the release notes to the release process documentation * Clarify that wallet RPC or Settings related release notes snippets should not be duplicated. I think it should be sufficient to only mention them in the wallet section and leave them out from the general RPC section. * Create an empty template to ensure the release notes can be cleared with a single `cp` command. Also, this ensures that the "no duplication" note isn't deleted again. (We used to have it in at least the 22.0 and 21.0 release notes: https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.21.0.md#updated-settings , but it was lost in the 23.0 notes) ACKs for top commit: laanwj: ACK fa4943e8df2e1048a5030f1d93776ca2b9da7b8a Tree-SHA512: 808e100ee1e371f7746a479ddfb237c6895935cffefc0e49033505492a03288013d5c20386af30f2a7dca8ad0c0628bdb6673dcb5cc4fcf4d0183b0ec65ce941 --- doc/release-notes-empty-template.md | 100 ++++++++++++++++++++++++++++ doc/release-process.md | 2 +- 2 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 doc/release-notes-empty-template.md diff --git a/doc/release-notes-empty-template.md b/doc/release-notes-empty-template.md new file mode 100644 index 000000000000..9a2d1a30c38b --- /dev/null +++ b/doc/release-notes-empty-template.md @@ -0,0 +1,100 @@ +*The release notes draft is a temporary file that can be added to by anyone. See +[/doc/developer-notes.md#release-notes](/doc/developer-notes.md#release-notes) +for the process.* + +# Dash Core version *version* +=============================== + +This is a new minor version release, bringing various bugfixes and performance improvements. +This release is **optional** for all nodes, although recommended. + +Please report bugs using the issue tracker at GitHub: + + + + +# Upgrading and downgrading + +## How to Upgrade + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes in some cases), then run the +installer (on Windows) or just copy over `/Applications/Dash-Qt` (on Mac) or +`dashd`/`dash-qt` (on Linux). + +## Downgrade warning + +### Downgrade to a version < *version* + +Downgrading to a version older than *version* may not be supported, and will +likely require a reindex. + +# Release Notes + +Notable changes +=============== + +P2P and network changes +----------------------- + +Updated RPCs +------------ + + +Changes to wallet related RPCs can be found in the Wallet section below. + +New RPCs +-------- + +Build System +------------ + +Updated settings +---------------- + + +Changes to GUI or wallet related settings can be found in the GUI or Wallet section below. + +New settings +------------ + +Tools and Utilities +------------------- + +Wallet +------ + +GUI changes +----------- + +Low-level changes +================= + +RPC +--- + +Tests +----- + +See detailed [set of changes][set-of-changes]. + +# Credits + +Thanks to everyone who directly contributed to this release: + +- +- +- + +As well as everyone that submitted issues, reviewed pull requests and helped +debug the release candidates. + +# Older releases + +These releases are considered obsolete. Old release notes can be found here: + +- +- +- + +[set-of-changes]: https://github.com/dashpay/dash/compare/*version*...dashpay:*version* diff --git a/doc/release-process.md b/doc/release-process.md index e1f9cb5f4ed7..f4fd46e8443d 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -9,7 +9,7 @@ Before every minor and major release: * [ ] Review ["Needs backport" labels](https://github.com/dashpay/dash/labels?q=backport). * [ ] Update DIPs with any changes introduced by this release (see [this pull request](https://github.com/dashpay/dips/pull/142) for an example) * [ ] Update version in `configure.ac` (don't forget to set `CLIENT_VERSION_IS_RELEASE` to `true`) -* [ ] Write release notes (see below) +* [ ] Write release notes (see below). To clear the release notes: `cp doc/release-notes-empty-template.md doc/release-notes.md` * [ ] Update `src/chainparams.cpp` `nMinimumChainWork` with information from the `getblockchaininfo` rpc. * [ ] Update `src/chainparams.cpp` `defaultAssumeValid` with information from the `getblockhash` rpc. - The selected value must not be orphaned so it may be useful to set the value two blocks back from the tip. From 6855957a782e0393f7025bc29b4f7d9a5f875811 Mon Sep 17 00:00:00 2001 From: laanwj <126646+laanwj@users.noreply.github.com> Date: Tue, 5 Apr 2022 15:40:51 +0200 Subject: [PATCH 028/656] Merge bitcoin/bitcoin#24710: Add concrete steps in doc/cjdns.md to easily find a friend 6a02355ae9e830ea40b5408acc564e9eb1da1378 Add and improve informational links in doc/cjdns.md (Jon Atack) 19538dd41ecd9f593ad56f1f5f3a2cb45ad2b4ef Add concrete steps in doc/cjdns.md to easily find a friend (Jon Atack) Pull request description: and improve the informational links. CJDNS functions with a friend-of-a-friend topology and a key hurdle to getting started is to find a public peer and set up an outbound connection to it. This update makes doing it much easier for people getting started. Credit to Vasil Dimov for an [IRC suggestion in October 2021](https://www.erisian.com.au/bitcoin-core-dev/log-2021-10-04.html#l-469) and to stickies-v for IRC discussions this week and the [testing guide](https://github.com/bitcoin-core/bitcoin-devwiki/wiki/23.0-Release-Candidate-Testing-Guide) that led me to redo these steps, provide feedback at https://github.com/bitcoin/bitcoin/issues/24706 and refine the added documentation here. ACKs for top commit: dunxen: ACK 6a02355 stickies-v: re-ACK [6a02355](https://github.com/bitcoin/bitcoin/commit/6a02355ae9e830ea40b5408acc564e9eb1da1378) even though I wasn't opposed to the "friend" terminology since it's the language CJDNS seems to use to denominate the peers you connect to directly in general. Not worth bikeshedding over though. lsilva01: Strong ACK 6a02355 Tree-SHA512: b2fa2a200a6a55a709486f7ed2d3830cabffbbffa61a0d211fcb666a918b5754d4e99a58c32909fe58540598066e6ff67bf2fa2fcd56b1b5dcff3c2162f6d962 --- doc/cjdns.md | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/doc/cjdns.md b/doc/cjdns.md index df2228194ae9..5d44871fafac 100644 --- a/doc/cjdns.md +++ b/doc/cjdns.md @@ -10,7 +10,8 @@ CJDNS is like a distributed, shared VPN with multiple entry points where every participant can reach any other participant. All participants use addresses from the `fc00::/8` network (reserved IPv6 range). Installation and configuration is done outside of Dash Core, similarly to a VPN (either in the host/OS or on -the network router). +the network router). See https://github.com/cjdelisle/cjdns#readme and +https://github.com/hyperboria/docs#hyperboriadocs for more information. Compared to IPv4/IPv6, CJDNS provides end-to-end encryption and protects nodes from traffic analysis and filtering. @@ -23,17 +24,37 @@ somewhat centralized. I2P connections have a source address and I2P is slow. CJDNS is fast but does not hide the sender and the recipient from intermediate routers. -## Installing CJDNS and connecting to the network +## Installing CJDNS and finding a peer to connect to the network To install and set up CJDNS, follow the instructions at -https://github.com/cjdelisle/cjdns#cjdns. +https://github.com/cjdelisle/cjdns#how-to-install-cjdns. -Don't skip steps +You need to initiate an outbound connection to a peer on the CJDNS network +before it will work with your Dash Core node. This is described in steps ["2. Find a friend"](https://github.com/cjdelisle/cjdns#2-find-a-friend) and ["3. Connect your node to your friend's -node"](https://github.com/cjdelisle/cjdns#3-connect-your-node-to-your-friends-node). -You need to be connected to the CJDNS network before it will work with your -Dash Core node. +node"](https://github.com/cjdelisle/cjdns#3-connect-your-node-to-your-friends-node) +in the CJDNS documentation. + +One quick way to accomplish these two steps is to query for available public +peers on [Hyperboria](https://github.com/hyperboria) by running the following: + +``` +git clone https://github.com/hyperboria/peers hyperboria-peers +cd hyperboria-peers +./testAvailable.py +``` + +For each peer, the `./testAvailable.py` script prints the filename of the peer's +credentials followed by the ping result. + +Choose one or several peers, copy their credentials from their respective files, +paste them into the relevant IPv4 or IPv6 "connectTo" JSON object in the +`cjdroute.conf` file you created in step ["1. Generate a new configuration +file"](https://github.com/cjdelisle/cjdns#1-generate-a-new-configuration-file), +and save the file. + +## Launching CJDNS Typically, CJDNS might be launched from its directory with `sudo ./cjdroute < cjdroute.conf` and it sheds permissions after setting up the From c62bea41659b56d365a8e8ea7040af141aba0aa0 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 26 Aug 2025 02:46:20 +0700 Subject: [PATCH 029/656] feat: create extra spk by default for mobile derivation path of CJ --- src/wallet/rpc/backup.cpp | 10 ++++++++++ src/wallet/rpc/wallet.cpp | 2 +- src/wallet/scriptpubkeyman.cpp | 6 +++--- src/wallet/scriptpubkeyman.h | 9 ++++++++- src/wallet/wallet.cpp | 7 ++++--- test/functional/wallet_listdescriptors.py | 4 +++- test/functional/wallet_mnemonicbits.py | 7 ++++--- 7 files changed, 33 insertions(+), 12 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 260b872c4c61..7c817361d6f3 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -4,6 +4,7 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include #include @@ -1976,6 +1977,7 @@ RPCHelpMan listdescriptors() {RPCResult::Type::NUM, "timestamp", "The creation time of the descriptor"}, {RPCResult::Type::BOOL, "active", "Whether this descriptor is currently used to generate new addresses"}, {RPCResult::Type::BOOL, "internal", /*optional=*/true, "True if this descriptor is used to generate change addresses. False if this descriptor is used to generate receiving addresses; defined only for active descriptors"}, + {RPCResult::Type::BOOL, "coinjoin", /*optional=*/true, "True if this descriptor is used to generate CoinJoin addresses. False if this descriptor is used to generate receiving addresses; defined only for active descriptors"}, {RPCResult::Type::ARR_FIXED, "range", /*optional=*/true, "Defined only for ranged descriptors", { {RPCResult::Type::NUM, "", "Range start inclusive"}, {RPCResult::Type::NUM, "", "Range end inclusive"}, @@ -2036,6 +2038,14 @@ RPCHelpMan listdescriptors() if (active && type != std::nullopt) { spk.pushKV("internal", wallet->GetScriptPubKeyMan(true) == desc_spk_man); } + if (type != std::nullopt) { + std::string match = strprintf("/9'/%s'/0'", Params().ExtCoinType()); + bool is_cj = descriptor.size() > 5 && descriptor.find(match) != std::string::npos; + if (is_cj) { + spk.pushKV("internal", false); + spk.pushKV("coinjoin", is_cj); + } + } if (wallet_descriptor.descriptor->IsRange()) { UniValue range(UniValue::VARR); range.push_back(wallet_descriptor.range_start); diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index 61cdabfa70d3..d7fdae8cd609 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -168,7 +168,7 @@ static RPCHelpMan getwalletinfo() {RPCResult::Type::NUM_TIME, "timefirstkey", "the " + UNIX_EPOCH_TIME + " of the oldest known key in the wallet"}, {RPCResult::Type::NUM_TIME, "keypoololdest", /*optional=*/true, "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only"}, {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"}, - {RPCResult::Type::NUM, "keypoolsize_hd_internal", /*optional=*/true, "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"}, + {RPCResult::Type::NUM, "keypoolsize_hd_internal", /*optional=*/ true, "how many new keys are pre-generated for internal use (used for change outputs and mobile coinjoin, only appears if the wallet is using this feature, otherwise external keys are used)"}, {RPCResult::Type::NUM, "keys_left", "how many new keys are left since last automatic backup"}, {RPCResult::Type::NUM_TIME, "unlocked_until", /*optional=*/true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"}, {RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"}, diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 3f685a30df7b..a7adf5708375 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2074,7 +2074,7 @@ bool DescriptorScriptPubKeyMan::AddDescriptorKeyWithDB(WalletBatch& batch, const } } -bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, bool internal) +bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, InternalKey internal) { LOCK(cs_desc_man); assert(m_storage.IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)); @@ -2099,10 +2099,10 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ std::string xpub = EncodeExtPubKey(master_key.Neuter()); // Build descriptor string - std::string desc_prefix = strprintf("pkh(%s/44'/%d'", xpub, Params().ExtCoinType()); + std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, internal == InternalKey::CoinJoin ? 9 : 44, Params().ExtCoinType()); std::string desc_suffix = "/*)"; - std::string internal_path = internal ? "/1" : "/0"; + std::string internal_path = (internal == InternalKey::Internal) ? "/1" : "/0"; std::string desc_str = desc_prefix + "/0'" + internal_path + desc_suffix; // Make the descriptor diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 12ff6590c8f9..273ddcf6fc8b 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -147,6 +147,13 @@ class CKeyPool } }; +enum class InternalKey +{ + External, + Internal, + CoinJoin, +}; + /* * A class implementing ScriptPubKeyMan manages some (or all) scriptPubKeys used in a wallet. * It contains the scripts and keys related to the scriptPubKeys it manages. @@ -575,7 +582,7 @@ class DescriptorScriptPubKeyMan : public ScriptPubKeyMan bool IsHDEnabled() const override; //! Setup descriptors based on the given CExtkey - bool SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, bool internal); + bool SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, InternalKey internal); bool HavePrivateKeys() const override; diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index f5b38c98d579..469086122ada 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -3787,7 +3787,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const SecureString& mnemonic_arg, CExtKey master_key; master_key.SetSeed(MakeByteSpan(seed_key)); - for (bool internal : {false, true}) { + for (auto internal : {InternalKey::External, InternalKey::Internal, InternalKey::CoinJoin}) { { // OUTPUT_TYPE is only one: LEGACY auto spk_manager = std::unique_ptr(new DescriptorScriptPubKeyMan(*this)); if (IsCrypted()) { @@ -3801,7 +3801,9 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const SecureString& mnemonic_arg, spk_manager->SetupDescriptorGeneration(master_key, mnemonic, mnemonic_passphrase, internal); uint256 id = spk_manager->GetID(); m_spk_managers[id] = std::move(spk_manager); - AddActiveScriptPubKeyMan(id, internal); + if (internal != InternalKey::CoinJoin) { + AddActiveScriptPubKeyMan(id, internal == InternalKey::Internal); + } } } } @@ -3826,7 +3828,6 @@ void CWallet::LoadActiveScriptPubKeyMan(uint256 id, bool internal) auto& spk_mans_other = internal ? m_external_spk_managers : m_internal_spk_managers; auto spk_man = m_spk_managers.at(id).get(); spk_mans = spk_man; - if (spk_mans_other == spk_man) { spk_mans_other = nullptr; } diff --git a/test/functional/wallet_listdescriptors.py b/test/functional/wallet_listdescriptors.py index 28de6b2e313b..3c80745142ee 100755 --- a/test/functional/wallet_listdescriptors.py +++ b/test/functional/wallet_listdescriptors.py @@ -46,9 +46,11 @@ def run_test(self): node.createwallet(wallet_name='w3', descriptors=True) result = node.get_wallet_rpc('w3').listdescriptors() assert_equal("w3", result['wallet_name']) - assert_equal(2, len(result['descriptors'])) + assert_equal(3, len(result['descriptors'])) assert_equal(2, len([d for d in result['descriptors'] if d['active']])) + self.log.info(f"result: {result['descriptors']}") assert_equal(1, len([d for d in result['descriptors'] if d['internal']])) + assert_equal(1, len([d for d in result['descriptors'] if 'coinjoin' in d and d['coinjoin']])) for item in result['descriptors']: assert item['desc'] != '' assert item['next_index'] == 0 diff --git a/test/functional/wallet_mnemonicbits.py b/test/functional/wallet_mnemonicbits.py index 6a9574f68327..744fee87b8c6 100755 --- a/test/functional/wallet_mnemonicbits.py +++ b/test/functional/wallet_mnemonicbits.py @@ -47,13 +47,14 @@ def run_test(self): assert_equal(len(desc['mnemonic'].split()), 12) mnemonic_count += 1 assert desc['mnemonic'] == mnemonic_pre - assert desc['active'] + assert_equal(desc['active'], ("coinjoin" not in desc or not desc['coinjoin'])) + # there should 3 descriptors in total # One of them is inactive imported private key for coinbase. It has no mnemonic # Two other should be active and have mnemonic - assert_equal(mnemonic_count, 2) + assert_equal(mnemonic_count, 3) assert_equal(cb_count, 1) - assert_equal(len(descriptors), 3) + assert_equal(len(descriptors), 4) else: assert_equal(len(self.nodes[0].dumphdinfo()["mnemonic"].split()), 12) # 12 words by default # legacy HD wallets could have only one chain From dd3a40c21da3cb48ac900bf1c9b806ca4631fe61 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Fri, 5 Sep 2025 23:44:31 +0700 Subject: [PATCH 030/656] doc: adds release notes for CJ descriptor --- doc/release-notes-6835.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 doc/release-notes-6835.md diff --git a/doc/release-notes-6835.md b/doc/release-notes-6835.md new file mode 100644 index 000000000000..294485a7570a --- /dev/null +++ b/doc/release-notes-6835.md @@ -0,0 +1,21 @@ +Mobile CoinJoin Compatibility +------------ + +- Fixed an issue where CoinJoin funds mixed in Dash Android wallet were + invisible when importing the mnemonic into Dash Core. Descriptor Wallets now + include an additional default descriptor for mobile CoinJoin funds, ensuring + seamless wallet migration and complete fund visibility across different + Dash wallet implementations. + +- This is a breaking change that increases the default number of descriptors + from 2 to 3 on mainnet (internal, external, mobile CoinJoin) for newly created + descriptor wallets only - existing wallets are unaffected. + + +Updated RPCs +------------ + +- The `listdescriptors` RPC now includes an optional coinjoin field to identify + CoinJoin descriptors. + +(#6835) From fb47795e8568f40fe5ba19d4bf2af44f16b067e5 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Mon, 22 Sep 2025 15:07:28 +0700 Subject: [PATCH 031/656] refactor: use named consts for derivation purpose 9 & 44 --- src/wallet/hdchain.cpp | 2 +- src/wallet/hdchain.h | 6 ++++++ src/wallet/scriptpubkeyman.cpp | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/wallet/hdchain.cpp b/src/wallet/hdchain.cpp index cc7fc0f76924..5e1098876cbb 100644 --- a/src/wallet/hdchain.cpp +++ b/src/wallet/hdchain.cpp @@ -203,6 +203,6 @@ size_t CHDChain::CountAccounts() std::string CHDPubKey::GetKeyPath() const { - return strprintf("m/44'/%d'/%d'/%d/%d", Params().ExtCoinType(), nAccountIndex, nChangeIndex, extPubKey.nChild); + return strprintf("m/%d'/%d'/%d'/%d/%d", BIP32_PURPOSE_STANDARD, Params().ExtCoinType(), nAccountIndex, nChangeIndex, extPubKey.nChild); } } // namespace wallet diff --git a/src/wallet/hdchain.h b/src/wallet/hdchain.h index 28385e3d2fb9..3a1d21b36200 100644 --- a/src/wallet/hdchain.h +++ b/src/wallet/hdchain.h @@ -140,6 +140,12 @@ class CHDPubKey std::string GetKeyPath() const; }; + +/** Purpose code used for DIP9 (feature derivation paths) */ +constexpr uint8_t BIP32_PURPOSE_FEATURE{9}; +/** Purpose code allotted to BIP 44 (standard derivation paths) */ +constexpr uint8_t BIP32_PURPOSE_STANDARD{44}; + } // namespace wallet #endif // BITCOIN_WALLET_HDCHAIN_H diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index a7adf5708375..28ecbbff3f68 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2099,7 +2099,7 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ std::string xpub = EncodeExtPubKey(master_key.Neuter()); // Build descriptor string - std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, internal == InternalKey::CoinJoin ? 9 : 44, Params().ExtCoinType()); + std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, internal == InternalKey::CoinJoin ? BIP32_PURPOSE_FEATURE : BIP32_PURPOSE_STANDARD, Params().ExtCoinType()); std::string desc_suffix = "/*)"; std::string internal_path = (internal == InternalKey::Internal) ? "/1" : "/0"; From 286b0074066808bb3ba7223bba0f390abbcb05a2 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 23 Sep 2025 21:31:52 +0700 Subject: [PATCH 032/656] refactor: drop useless check that string is longer than 5 chars It's already checked by find() --- src/wallet/rpc/backup.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 7c817361d6f3..e21da3ca8422 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -2039,8 +2039,8 @@ RPCHelpMan listdescriptors() spk.pushKV("internal", wallet->GetScriptPubKeyMan(true) == desc_spk_man); } if (type != std::nullopt) { - std::string match = strprintf("/9'/%s'/0'", Params().ExtCoinType()); - bool is_cj = descriptor.size() > 5 && descriptor.find(match) != std::string::npos; + std::string match = strprintf("/%d'/%s'/0'", BIP32_PURPOSE_FEATURE, Params().ExtCoinType()); + bool is_cj = descriptor.find(match) != std::string::npos; if (is_cj) { spk.pushKV("internal", false); spk.pushKV("coinjoin", is_cj); From 859761cfcbb111de3091693d5cc48fface268ae8 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Thu, 25 Sep 2025 14:09:28 +0700 Subject: [PATCH 033/656] refactor: rename InternalKey to PathDerivationType --- src/wallet/scriptpubkeyman.cpp | 6 +++--- src/wallet/scriptpubkeyman.h | 10 +++++----- src/wallet/wallet.cpp | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 28ecbbff3f68..8922ac00ae3b 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2074,7 +2074,7 @@ bool DescriptorScriptPubKeyMan::AddDescriptorKeyWithDB(WalletBatch& batch, const } } -bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, InternalKey internal) +bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, PathDerivationType type) { LOCK(cs_desc_man); assert(m_storage.IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)); @@ -2099,10 +2099,10 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ std::string xpub = EncodeExtPubKey(master_key.Neuter()); // Build descriptor string - std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, internal == InternalKey::CoinJoin ? BIP32_PURPOSE_FEATURE : BIP32_PURPOSE_STANDARD, Params().ExtCoinType()); + std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, type == PathDerivationType::DIP0009_CoinJoin ? BIP32_PURPOSE_FEATURE : BIP32_PURPOSE_STANDARD, Params().ExtCoinType()); std::string desc_suffix = "/*)"; - std::string internal_path = (internal == InternalKey::Internal) ? "/1" : "/0"; + std::string internal_path = (type == PathDerivationType::BIP44_Internal) ? "/1" : "/0"; std::string desc_str = desc_prefix + "/0'" + internal_path + desc_suffix; // Make the descriptor diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 273ddcf6fc8b..7f4ee75c413b 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -147,11 +147,11 @@ class CKeyPool } }; -enum class InternalKey +enum class PathDerivationType { - External, - Internal, - CoinJoin, + BIP44_External, + BIP44_Internal, + DIP0009_CoinJoin, }; /* @@ -582,7 +582,7 @@ class DescriptorScriptPubKeyMan : public ScriptPubKeyMan bool IsHDEnabled() const override; //! Setup descriptors based on the given CExtkey - bool SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, InternalKey internal); + bool SetupDescriptorGeneration(const CExtKey& master_key, const SecureString& secure_mnemonic, const SecureString& secure_mnemonic_passphrase, PathDerivationType type); bool HavePrivateKeys() const override; diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 469086122ada..e9c09e8186e0 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -3787,7 +3787,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const SecureString& mnemonic_arg, CExtKey master_key; master_key.SetSeed(MakeByteSpan(seed_key)); - for (auto internal : {InternalKey::External, InternalKey::Internal, InternalKey::CoinJoin}) { + for (auto type : {PathDerivationType::BIP44_External, PathDerivationType::BIP44_Internal, PathDerivationType::DIP0009_CoinJoin}) { { // OUTPUT_TYPE is only one: LEGACY auto spk_manager = std::unique_ptr(new DescriptorScriptPubKeyMan(*this)); if (IsCrypted()) { @@ -3798,11 +3798,11 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const SecureString& mnemonic_arg, throw std::runtime_error(std::string(__func__) + ": Could not encrypt new descriptors"); } } - spk_manager->SetupDescriptorGeneration(master_key, mnemonic, mnemonic_passphrase, internal); + spk_manager->SetupDescriptorGeneration(master_key, mnemonic, mnemonic_passphrase, type); uint256 id = spk_manager->GetID(); m_spk_managers[id] = std::move(spk_manager); - if (internal != InternalKey::CoinJoin) { - AddActiveScriptPubKeyMan(id, internal == InternalKey::Internal); + if (type != PathDerivationType::DIP0009_CoinJoin) { + AddActiveScriptPubKeyMan(id, type == PathDerivationType::BIP44_Internal); } } } From 32fd4611fe0a6ed16888c755834b2024ccd79474 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Thu, 25 Sep 2025 14:13:19 +0700 Subject: [PATCH 034/656] fmt: clang format --- src/wallet/hdchain.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/wallet/hdchain.cpp b/src/wallet/hdchain.cpp index 5e1098876cbb..3f50b1415e45 100644 --- a/src/wallet/hdchain.cpp +++ b/src/wallet/hdchain.cpp @@ -203,6 +203,7 @@ size_t CHDChain::CountAccounts() std::string CHDPubKey::GetKeyPath() const { - return strprintf("m/%d'/%d'/%d'/%d/%d", BIP32_PURPOSE_STANDARD, Params().ExtCoinType(), nAccountIndex, nChangeIndex, extPubKey.nChild); + return strprintf("m/%d'/%d'/%d'/%d/%d", BIP32_PURPOSE_STANDARD, Params().ExtCoinType(), nAccountIndex, nChangeIndex, + extPubKey.nChild); } } // namespace wallet From c821702423c4b1f7bac54f1fba7fee66236ef8f8 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 30 Sep 2025 03:47:13 +0700 Subject: [PATCH 035/656] fix: derivation path for cj - add missing /4' (cj purpose) --- src/wallet/rpc/backup.cpp | 2 +- src/wallet/scriptpubkeyman.cpp | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index e21da3ca8422..506053f0e57b 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -2039,7 +2039,7 @@ RPCHelpMan listdescriptors() spk.pushKV("internal", wallet->GetScriptPubKeyMan(true) == desc_spk_man); } if (type != std::nullopt) { - std::string match = strprintf("/%d'/%s'/0'", BIP32_PURPOSE_FEATURE, Params().ExtCoinType()); + std::string match = strprintf("/%d'/%s'/4'/0'", BIP32_PURPOSE_FEATURE, Params().ExtCoinType()); bool is_cj = descriptor.find(match) != std::string::npos; if (is_cj) { spk.pushKV("internal", false); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 8922ac00ae3b..28c354dd6997 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2100,8 +2100,10 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ // Build descriptor string std::string desc_prefix = strprintf("pkh(%s/%d'/%d'", xpub, type == PathDerivationType::DIP0009_CoinJoin ? BIP32_PURPOSE_FEATURE : BIP32_PURPOSE_STANDARD, Params().ExtCoinType()); + if (type == PathDerivationType::DIP0009_CoinJoin) { + desc_prefix += "/4'"; + } std::string desc_suffix = "/*)"; - std::string internal_path = (type == PathDerivationType::BIP44_Internal) ? "/1" : "/0"; std::string desc_str = desc_prefix + "/0'" + internal_path + desc_suffix; From 8a49279541030356ebd7b1304977752b90b07eb2 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 30 Sep 2025 03:51:37 +0700 Subject: [PATCH 036/656] fix: rpc improvements for CJ derivation path Co-authored-by: UdjinM6 --- src/wallet/rpc/backup.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 506053f0e57b..7c4d192bfa0d 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -1977,7 +1977,7 @@ RPCHelpMan listdescriptors() {RPCResult::Type::NUM, "timestamp", "The creation time of the descriptor"}, {RPCResult::Type::BOOL, "active", "Whether this descriptor is currently used to generate new addresses"}, {RPCResult::Type::BOOL, "internal", /*optional=*/true, "True if this descriptor is used to generate change addresses. False if this descriptor is used to generate receiving addresses; defined only for active descriptors"}, - {RPCResult::Type::BOOL, "coinjoin", /*optional=*/true, "True if this descriptor is used to generate CoinJoin addresses. False if this descriptor is used to generate receiving addresses; defined only for active descriptors"}, + {RPCResult::Type::BOOL, "coinjoin", /*optional=*/true, "True if this descriptor is used to generate CoinJoin addresses. False if this descriptor is used to generate receiving addresses."}, {RPCResult::Type::ARR_FIXED, "range", /*optional=*/true, "Defined only for ranged descriptors", { {RPCResult::Type::NUM, "", "Range start inclusive"}, {RPCResult::Type::NUM, "", "Range end inclusive"}, @@ -2041,10 +2041,7 @@ RPCHelpMan listdescriptors() if (type != std::nullopt) { std::string match = strprintf("/%d'/%s'/4'/0'", BIP32_PURPOSE_FEATURE, Params().ExtCoinType()); bool is_cj = descriptor.find(match) != std::string::npos; - if (is_cj) { - spk.pushKV("internal", false); - spk.pushKV("coinjoin", is_cj); - } + spk.pushKV("coinjoin", is_cj); } if (wallet_descriptor.descriptor->IsRange()) { UniValue range(UniValue::VARR); From b957689d6edca231bdf398e4bb124687814da3d4 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 30 Sep 2025 13:47:00 +0700 Subject: [PATCH 037/656] fix: functional tests after RPC changes --- src/wallet/rpc/backup.cpp | 6 ++++-- test/functional/wallet_listdescriptors.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 7c4d192bfa0d..1e3cf53c5293 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -1977,7 +1977,7 @@ RPCHelpMan listdescriptors() {RPCResult::Type::NUM, "timestamp", "The creation time of the descriptor"}, {RPCResult::Type::BOOL, "active", "Whether this descriptor is currently used to generate new addresses"}, {RPCResult::Type::BOOL, "internal", /*optional=*/true, "True if this descriptor is used to generate change addresses. False if this descriptor is used to generate receiving addresses; defined only for active descriptors"}, - {RPCResult::Type::BOOL, "coinjoin", /*optional=*/true, "True if this descriptor is used to generate CoinJoin addresses. False if this descriptor is used to generate receiving addresses."}, + {RPCResult::Type::BOOL, "coinjoin", /*optional=*/true, "True if this descriptor is used to generate CoinJoin addresses; defined only if it is True."}, {RPCResult::Type::ARR_FIXED, "range", /*optional=*/true, "Defined only for ranged descriptors", { {RPCResult::Type::NUM, "", "Range start inclusive"}, {RPCResult::Type::NUM, "", "Range end inclusive"}, @@ -2041,7 +2041,9 @@ RPCHelpMan listdescriptors() if (type != std::nullopt) { std::string match = strprintf("/%d'/%s'/4'/0'", BIP32_PURPOSE_FEATURE, Params().ExtCoinType()); bool is_cj = descriptor.find(match) != std::string::npos; - spk.pushKV("coinjoin", is_cj); + if (is_cj) { + spk.pushKV("coinjoin", is_cj); + } } if (wallet_descriptor.descriptor->IsRange()) { UniValue range(UniValue::VARR); diff --git a/test/functional/wallet_listdescriptors.py b/test/functional/wallet_listdescriptors.py index 3c80745142ee..bd0c18d18e64 100755 --- a/test/functional/wallet_listdescriptors.py +++ b/test/functional/wallet_listdescriptors.py @@ -49,7 +49,7 @@ def run_test(self): assert_equal(3, len(result['descriptors'])) assert_equal(2, len([d for d in result['descriptors'] if d['active']])) self.log.info(f"result: {result['descriptors']}") - assert_equal(1, len([d for d in result['descriptors'] if d['internal']])) + assert_equal(1, len([d for d in result['descriptors'] if 'internal' in d and d['internal']])) assert_equal(1, len([d for d in result['descriptors'] if 'coinjoin' in d and d['coinjoin']])) for item in result['descriptors']: assert item['desc'] != '' From 5205b614afb0eb61255ee39381bcd39153265a65 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Wed, 1 Oct 2025 19:59:22 +0300 Subject: [PATCH 038/656] fix: wrap the whole text in a global span to control its styling properly --- src/qt/sendcoinsdialog.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index bc5011db329e..15b22a54d230 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -362,6 +362,9 @@ bool SendCoinsDialog::send(const QList& recipients, QString& formatted_short.erase(formatted_short.begin() + MAX_SEND_POPUP_ENTRIES, formatted_short.end()); } + // Wrap the whole text in a global span to control its styling properly + question_string.append(""); + /*: Message displayed when attempting to create a transaction. Cautionary text to prompt the user to verify that the displayed transaction details represent the transaction the user intends to create. */ question_string.append(tr("Do you want to create this transaction?")); @@ -463,6 +466,9 @@ bool SendCoinsDialog::send(const QList& recipients, QString& question_string.append(QString("
(=%1)") .arg(alternativeUnits.join(" " + tr("or") + " "))); + // Close the global span we opened at the very beginning + question_string.append("
"); + if (formatted.size() > 1) { informative_text = tr("To review recipient list click \"Show Details…\""); detailed_text = formatted.join("\n\n"); From 732ef96f02762b139c15ca95c5c34d67d8eb326a Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Wed, 1 Oct 2025 20:01:18 +0300 Subject: [PATCH 039/656] fix: typo: B in PSBT is Blockchain in our codebase --- src/qt/sendcoinsdialog.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index 15b22a54d230..ee9deeacaea8 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -376,12 +376,12 @@ bool SendCoinsDialog::send(const QList& recipients, QString& /*: Text to inform a user attempting to create a transaction of their current options. At this stage, a user can only create a PSBT. This string is displayed when private keys are disabled and an external signer is not available. */ - question_string.append(tr("Please, review your transaction proposal. This will produce a Partially Signed Bitcoin Transaction (PSBT) which you can save or copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME)); + question_string.append(tr("Please, review your transaction proposal. This will produce a Partially Signed Blockchain Transaction (PSBT) which you can save or copy and then sign with e.g. an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME)); } else if (model->getOptionsModel()->getEnablePSBTControls()) { /*: Text to inform a user attempting to create a transaction of their current options. At this stage, a user can send their transaction or create a PSBT. This string is displayed when both private keys and PSBT controls are enabled. */ - question_string.append(tr("Please, review your transaction. You can create and send this transaction or create a Partially Signed Bitcoin Transaction (PSBT), which you can save or copy and then sign with, e.g., an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME)); + question_string.append(tr("Please, review your transaction. You can create and send this transaction or create a Partially Signed Blockchain Transaction (PSBT), which you can save or copy and then sign with, e.g., an offline %1 wallet, or a PSBT-compatible hardware wallet.").arg(PACKAGE_NAME)); } else { /*: Text to prompt a user to review the details of the transaction they are attempting to send. */ question_string.append(tr("Please, review your transaction.")); From 2f27744f966a84db10d31e09f5bec71f070031c6 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Wed, 1 Oct 2025 23:52:15 +0300 Subject: [PATCH 040/656] fix: typo: add missing whitespace --- src/qt/sendcoinsdialog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index ee9deeacaea8..26ffc2309a3d 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -444,7 +444,7 @@ bool SendCoinsDialog::send(const QList& recipients, QString& question_string.append("
"); question_string.append(""); question_string.append(tr("Warning: Using %1 with %2 or more inputs can harm your privacy and is not recommended").arg(strCoinJoinName).arg(10)); - question_string.append(""); + question_string.append(" "); question_string.append(tr("Click to learn more")); question_string.append(""); question_string.append(" "); From ca4a36409c3a6416559ef75fdbeab14a6bd7d428 Mon Sep 17 00:00:00 2001 From: PastaBot <156604295+DashCoreAutoGuix@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:33:27 -0500 Subject: [PATCH 041/656] Merge bitcoin-core/gui#752: Modify command line help to show support for BIP21 URIs Co-authored-by: Hernan Marino fixup 752 Co-authored-by: UdjinM6 --- src/qt/utilitydialog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qt/utilitydialog.cpp b/src/qt/utilitydialog.cpp index a57217e09e70..a6ae662d39c7 100644 --- a/src/qt/utilitydialog.cpp +++ b/src/qt/utilitydialog.cpp @@ -62,7 +62,8 @@ HelpMessageDialog::HelpMessageDialog(QWidget *parent, HelpMode helpMode) : ui->helpMessage->setVisible(false); } else if (helpMode == cmdline) { setWindowTitle(tr("Command-line options")); - QString header = "Usage: dash-qt [command-line options] \n"; + QString header = "Usage: dash-qt [command-line options] [URI]\n\n" + "Optional URI is a Dash address in BIP21 URI format.\n"; QTextCursor cursor(ui->helpMessage->document()); cursor.insertText(version); cursor.insertBlock(); From a7cca6c3776fb5e3ccf2f52fcbeebc207662d932 Mon Sep 17 00:00:00 2001 From: PastaBot <156604295+DashCoreAutoGuix@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:33:49 -0500 Subject: [PATCH 042/656] Merge bitcoin-core/gui#553: Change address / amount error background Co-authored-by: backport Co-authored-by: Claude --- src/qt/guiutil.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index ae9f29e2cd4c..728d861fb5ba 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -175,7 +175,7 @@ static const std::map themedDarkColors = { }; static const std::map themedStyles = { - { ThemedStyle::TS_INVALID, "background:#a84832;" }, + { ThemedStyle::TS_INVALID, "border: 3px solid #a84832;" }, { ThemedStyle::TS_ERROR, "color:#a84832;" }, { ThemedStyle::TS_WARNING, "color:#999900;" }, { ThemedStyle::TS_SUCCESS, "color:#5e8c41;" }, @@ -185,7 +185,7 @@ static const std::map themedStyles = { }; static const std::map themedDarkStyles = { - { ThemedStyle::TS_INVALID, "background:#a84832;" }, + { ThemedStyle::TS_INVALID, "border: 3px solid #a84832;" }, { ThemedStyle::TS_ERROR, "color:#a84832;" }, { ThemedStyle::TS_WARNING, "color:#999900;" }, { ThemedStyle::TS_SUCCESS, "color:#5e8c41;" }, From 7d1e7d54f8d1ec2f2d62396127bd9710ef81784e Mon Sep 17 00:00:00 2001 From: PastaBot <156604295+DashCoreAutoGuix@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:35:20 -0500 Subject: [PATCH 043/656] Merge bitcoin/bitcoin#29304: fuzz: Exit and log stderr for parse_test_list errors Co-authored-by: fanquake Co-authored-by: pasta --- test/fuzz/test_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index 929eef9cf75d..add9145da517 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -334,8 +334,8 @@ def parse_test_list(*, fuzz_bin, source_dir): **get_fuzz_env(target="", source_dir=source_dir) }, stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, universal_newlines=True, + check=True, ).stdout.splitlines() return test_list_all From d96d2abec58e0aabc2b8aad83a22e73b58b82dbd Mon Sep 17 00:00:00 2001 From: PastaBot <156604295+DashCoreAutoGuix@users.noreply.github.com> Date: Tue, 30 Sep 2025 09:36:13 -0500 Subject: [PATCH 044/656] Merge bitcoin/bitcoin#29186: ci, iwyu: Drop backported mappings Co-authored-by: fanquake --- contrib/devtools/iwyu/bitcoin.core.imp | 3 --- 1 file changed, 3 deletions(-) diff --git a/contrib/devtools/iwyu/bitcoin.core.imp b/contrib/devtools/iwyu/bitcoin.core.imp index ce7786f58c24..1f31229e0a48 100644 --- a/contrib/devtools/iwyu/bitcoin.core.imp +++ b/contrib/devtools/iwyu/bitcoin.core.imp @@ -1,6 +1,3 @@ # Fixups / upstreamed changes [ - { include: [ "", private, "", public ] }, - { include: [ "", private, "", public ] }, - { include: [ "", private, "", public ] }, ] From 58407bc1813856b277a3323a6c920c50fb7e6a40 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:12:37 +0000 Subject: [PATCH 045/656] evo: add `DomainPort` for validating and storing domain and port pair --- src/evo/netinfo.cpp | 59 ++++++++++++++++++++++++++++ src/evo/netinfo.h | 71 +++++++++++++++++++++++++++++++++ src/test/evo_netinfo_tests.cpp | 72 +++++++++++++++++++++++++++++++--- 3 files changed, 196 insertions(+), 6 deletions(-) diff --git a/src/evo/netinfo.cpp b/src/evo/netinfo.cpp index 2c4dc7ef712c..d9458edcc019 100644 --- a/src/evo/netinfo.cpp +++ b/src/evo/netinfo.cpp @@ -18,8 +18,16 @@ namespace { static std::unique_ptr g_main_params{nullptr}; static std::once_flag g_main_params_flag; +/** Maximum length of a label in a domain per RFC 1035 */ +static constexpr uint8_t DOMAIN_LABEL_MAX_LEN{63}; +/** Maximum possible length of a ASCII FQDN */ +static constexpr uint8_t DOMAIN_MAX_LEN{253}; +/** Minimum length of a FQDN */ +static constexpr uint8_t DOMAIN_MIN_LEN{3}; + static constexpr std::string_view SAFE_CHARS_IPV4{"1234567890."}; static constexpr std::string_view SAFE_CHARS_IPV4_6{"abcdefABCDEF1234567890.:[]"}; +static constexpr std::string_view SAFE_CHARS_RFC1035{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-"}; bool MatchCharsFilter(std::string_view input, std::string_view filter) { @@ -42,6 +50,57 @@ UniValue ArrFromService(const CService& addr) return obj; } +DomainPort::Status DomainPort::ValidateDomain(const std::string& addr) +{ + if (addr.length() > DOMAIN_MAX_LEN || addr.length() < DOMAIN_MIN_LEN) { + return DomainPort::Status::BadLen; + } + if (!MatchCharsFilter(addr, SAFE_CHARS_RFC1035)) { + return DomainPort::Status::BadChar; + } + if (addr.front() == '.' || addr.back() == '.') { + return DomainPort::Status::BadCharPos; + } + std::vector labels{SplitString(addr, '.')}; + if (labels.size() < 2) { + return DomainPort::Status::BadDotless; + } + for (const auto& label : labels) { + if (label.empty() || label.length() > DOMAIN_LABEL_MAX_LEN) { + return DomainPort::Status::BadLabelLen; + } + if (label.front() == '-' || label.back() == '-') { + return DomainPort::Status::BadLabelCharPos; + } + } + return DomainPort::Status::Success; +} + +DomainPort::Status DomainPort::Set(const std::string& addr, const uint16_t port) +{ + if (port == 0) { + return DomainPort::Status::BadPort; + } + const auto ret{ValidateDomain(addr)}; + if (ret == DomainPort::Status::Success) { + // Convert to lowercase to avoid duplication by changing case (domains are case-insensitive) + m_addr = ToLower(addr); + m_port = port; + } + return ret; +} + +DomainPort::Status DomainPort::Validate() const +{ + if (m_addr.empty() || m_addr != ToLower(m_addr)) { + return DomainPort::Status::Malformed; + } + if (m_port == 0) { + return DomainPort::Status::BadPort; + } + return ValidateDomain(m_addr); +} + bool NetInfoEntry::operator==(const NetInfoEntry& rhs) const { if (m_type != rhs.m_type) return false; diff --git a/src/evo/netinfo.h b/src/evo/netinfo.h index 4f1cdb9f1717..22367dda556d 100644 --- a/src/evo/netinfo.h +++ b/src/evo/netinfo.h @@ -106,6 +106,77 @@ UniValue ArrFromService(const CService& addr); /** Equivalent to Params() if node is running on mainnet */ const CChainParams& MainParams(); +class DomainPort +{ +public: + enum class Status : uint8_t { + BadChar, + BadCharPos, + BadDotless, + BadLabelCharPos, + BadLabelLen, + BadLen, + BadPort, + Malformed, + + Success + }; + + static constexpr std::string_view StatusToString(const DomainPort::Status code) + { + switch (code) { + case DomainPort::Status::BadChar: + return "invalid character"; + case DomainPort::Status::BadCharPos: + return "bad domain character position"; + case DomainPort::Status::BadDotless: + return "prohibited dotless"; + case DomainPort::Status::BadLabelCharPos: + return "bad label character position"; + case DomainPort::Status::BadLabelLen: + return "bad label length"; + case DomainPort::Status::BadLen: + return "bad domain length"; + case DomainPort::Status::BadPort: + return "bad port"; + case DomainPort::Status::Malformed: + return "malformed"; + case DomainPort::Status::Success: + return "success"; + } // no default case, so the compiler can warn about missing cases + assert(false); + } + +private: + std::string m_addr{}; + uint16_t m_port{0}; + +private: + static DomainPort::Status ValidateDomain(const std::string& input); + +public: + DomainPort() = default; + ~DomainPort() = default; + + bool operator<(const DomainPort& rhs) const { return std::tie(m_addr, m_port) < std::tie(rhs.m_addr, rhs.m_port); } + bool operator==(const DomainPort& rhs) const { return std::tie(m_addr, m_port) == std::tie(rhs.m_addr, rhs.m_port); } + bool operator!=(const DomainPort& rhs) const { return !(*this == rhs); } + + SERIALIZE_METHODS(DomainPort, obj) + { + READWRITE(obj.m_addr); + READWRITE(Using>(obj.m_port)); + } + + bool IsEmpty() const { return m_addr.empty() && m_port == 0; } + bool IsValid() const { return Validate() == DomainPort::Status::Success; } + DomainPort::Status Set(const std::string& addr, const uint16_t port); + DomainPort::Status Validate() const; + uint16_t GetPort() const { return m_port; } + std::string ToStringAddr() const { return m_addr; } + std::string ToStringAddrPort() const { return strprintf("%s:%d", m_addr, m_port); } +}; + class NetInfoEntry { public: diff --git a/src/test/evo_netinfo_tests.cpp b/src/test/evo_netinfo_tests.cpp index 7c20deebf4e9..509042f22dd1 100644 --- a/src/test/evo_netinfo_tests.cpp +++ b/src/test/evo_netinfo_tests.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -21,7 +22,7 @@ struct TestEntry { NetInfoStatus expected_ret_ext; }; -static const std::vector vals_main{ +static const std::vector addr_vals_main{ // Address and port specified {{NetInfoPurpose::CORE_P2P, "1.1.1.1:9999"}, NetInfoStatus::Success, NetInfoStatus::Success}, // - Port should default to default P2P core with MnNetInfo @@ -100,7 +101,7 @@ void TestExtNetInfo(const std::vector& vals) BOOST_AUTO_TEST_CASE(mnnetinfo_rules_main) { - TestMnNetInfo(vals_main); + TestMnNetInfo(addr_vals_main); { // MnNetInfo only stores one value, overwriting prohibited @@ -122,9 +123,9 @@ BOOST_AUTO_TEST_CASE(mnnetinfo_rules_main) } } -BOOST_AUTO_TEST_CASE(extnetinfo_rules_main) { TestExtNetInfo(vals_main); } +BOOST_AUTO_TEST_CASE(extnetinfo_rules_main) { TestExtNetInfo(addr_vals_main); } -static const std::vector vals_reg{ +static const std::vector addr_vals_reg{ // - MnNetInfo doesn't mind using port 0 // - ExtNetInfo requires non-zero ports {{NetInfoPurpose::CORE_P2P, "1.1.1.1:0"}, NetInfoStatus::Success, NetInfoStatus::BadPort}, @@ -136,11 +137,11 @@ static const std::vector vals_reg{ {{NetInfoPurpose::CORE_P2P, "1.1.1.1:22"}, NetInfoStatus::Success, NetInfoStatus::BadPort}, }; -BOOST_FIXTURE_TEST_CASE(mnnetinfo_rules_reg, RegTestingSetup) { TestMnNetInfo(vals_reg); } +BOOST_FIXTURE_TEST_CASE(mnnetinfo_rules_reg, RegTestingSetup) { TestMnNetInfo(addr_vals_reg); } BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) { - TestExtNetInfo(vals_reg); + TestExtNetInfo(addr_vals_reg); { // ExtNetInfo can store up to 4 entries per purpose code, check limit enforcement @@ -364,4 +365,63 @@ BOOST_AUTO_TEST_CASE(interface_equality) BOOST_CHECK(!util::shared_ptr_equal(ptr_lhs, ptr_rhs) && util::shared_ptr_not_equal(ptr_lhs, ptr_rhs)); } +BOOST_AUTO_TEST_CASE(domainport_rules) +{ + static const std::vector> domain_vals{ + // Domain name labels can be as small as one character long and remain valid + {"r.server-1.ab.cd", DomainPort::Status::Success}, + // Domain names labels can trail with numbers or consist entirely of numbers due to RFC 1123 + {"9998.9example7.ab", DomainPort::Status::Success}, + // dotless domains prohibited + {"abcd", DomainPort::Status::BadDotless}, + // no empty label (trailing delimiter) + {"abc.", DomainPort::Status::BadCharPos}, + // no empty label (leading delimiter) + {".abc", DomainPort::Status::BadCharPos}, + // no empty label (extra delimiters) + {"a..dot..b", DomainPort::Status::BadLabelLen}, + // ' is not a valid character in domains + {"somebody's macbook pro.local", DomainPort::Status::BadChar}, + // spaces are not a valid character in domains + {"somebodys macbook pro.local", DomainPort::Status::BadChar}, + // trailing hyphens are not allowed + {"-a-.bc.de", DomainPort::Status::BadLabelCharPos}, + // 2 (characters in domain) < 3 (minimum length) + {"ac", DomainPort::Status::BadLen}, + // 278 (characters in domain) > 253 (maximum limit) + {"Loremipsumdolorsitametconsecteturadipiscingelitseddoeiusmodtempor" + "incididuntutlaboreetdoloremagnaaliquaUtenimadminimveniamquisnostrud" + "exercitationullamcolaborisnisiutaliquipexeacommodoconsequatDuisaute" + "iruredolorinreprehenderitinvoluptatevelitessecillumdoloreeufugiatnullapariat.ur", DomainPort::Status::BadLen}, + // 64 (characters in label) > 63 (maximum limit) + {"loremipsumdolorsitametconsecteturadipiscingelitseddoeiusmodtempo.ri.nc", DomainPort::Status::BadLabelLen}, + }; + + for (const auto& [addr, retval] : domain_vals) { + DomainPort domain; + BOOST_CHECK_EQUAL(domain.Set(addr, 9999), retval); + if (retval != DomainPort::Status::Success) { + BOOST_CHECK_EQUAL(domain.Validate(), DomainPort::Status::Malformed); // Empty values report as Malformed + } else { + BOOST_CHECK_EQUAL(domain.Validate(), DomainPort::Status::Success); + } + } + + { + // DomainPort requires non-zero ports + DomainPort domain; + BOOST_CHECK_EQUAL(domain.Set("example.com", 0), DomainPort::Status::BadPort); + BOOST_CHECK_EQUAL(domain.Validate(), DomainPort::Status::Malformed); + } + + { + // DomainPort stores the domain in lower-case + DomainPort lhs, rhs; + BOOST_CHECK_EQUAL(lhs.Set("example.com", 9999), DomainPort::Status::Success); + BOOST_CHECK_EQUAL(rhs.Set(ToUpper("example.com"), 9999), DomainPort::Status::Success); + BOOST_CHECK_EQUAL(lhs.ToStringAddr(), rhs.ToStringAddr()); + BOOST_CHECK(lhs == rhs); + } +} + BOOST_AUTO_TEST_SUITE_END() From 79cc52ca4d848d6c456c1b3dbc9b51c4a2fb4cb9 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sat, 9 Aug 2025 11:54:00 +0000 Subject: [PATCH 046/656] evo: allow storing `DomainPort` in `NetInfoEntry` --- src/evo/deterministicmns.cpp | 29 +++++++++++++++++++++++++++++ src/evo/netinfo.cpp | 29 +++++++++++++++++++++++++---- src/evo/netinfo.h | 25 ++++++++++++++++++++++--- src/evo/specialtxman.cpp | 9 +++++++++ 4 files changed, 85 insertions(+), 7 deletions(-) diff --git a/src/evo/deterministicmns.cpp b/src/evo/deterministicmns.cpp index ec00139807f6..1faff56d4e48 100644 --- a/src/evo/deterministicmns.cpp +++ b/src/evo/deterministicmns.cpp @@ -440,6 +440,12 @@ void CDeterministicMNList::AddMN(const CDeterministicMNCPtr& dmn, bool fBumpTota throw std::runtime_error(strprintf("%s: Can't add a masternode %s with a duplicate address=%s", __func__, dmn->proTxHash.ToString(), service_opt->ToStringAddrPort())); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (!AddUniqueProperty(*dmn, *domain_opt)) { + mnUniquePropertyMap = mnUniquePropertyMapSaved; + throw std::runtime_error(strprintf("%s: Can't add a masternode %s with a duplicate address=%s", + __func__, dmn->proTxHash.ToString(), domain_opt->ToStringAddrPort())); + } } else { mnUniquePropertyMap = mnUniquePropertyMapSaved; throw std::runtime_error( @@ -494,6 +500,10 @@ void CDeterministicMNList::UpdateMN(const CDeterministicMN& oldDmn, const std::s if (!DeleteUniqueProperty(dmn, *service_opt)) { return "internal error"; // This shouldn't be possible } + } else if (const auto domain_opt{old_entry.GetDomainPort()}) { + if (!DeleteUniqueProperty(dmn, *domain_opt)) { + return "internal error"; // This shouldn't be possible + } } else { return "invalid address"; } @@ -503,6 +513,10 @@ void CDeterministicMNList::UpdateMN(const CDeterministicMN& oldDmn, const std::s if (!AddUniqueProperty(dmn, *service_opt)) { return strprintf("duplicate (%s)", service_opt->ToStringAddrPort()); } + } else if (const auto domain_opt{new_entry.GetDomainPort()}) { + if (!AddUniqueProperty(dmn, *domain_opt)) { + return strprintf("duplicate (%s)", domain_opt->ToStringAddrPort()); + } } else { return "invalid address"; } @@ -583,6 +597,12 @@ void CDeterministicMNList::RemoveMN(const uint256& proTxHash) throw std::runtime_error(strprintf("%s: Can't delete a masternode %s with an address=%s", __func__, proTxHash.ToString(), service_opt->ToStringAddrPort())); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (!DeleteUniqueProperty(*dmn, *domain_opt)) { + mnUniquePropertyMap = mnUniquePropertyMapSaved; + throw std::runtime_error(strprintf("%s: Can't delete a masternode %s with an address=%s", __func__, + proTxHash.ToString(), domain_opt->ToStringAddrPort())); + } } else { mnUniquePropertyMap = mnUniquePropertyMapSaved; throw std::runtime_error(strprintf("%s: Can't delete a masternode %s with invalid address", __func__, @@ -1142,6 +1162,11 @@ bool CheckProRegTx(CDeterministicMNManager& dmnman, const CTransaction& tx, gsl: mnList.GetUniquePropertyMN(*service_opt)->collateralOutpoint != collateralOutpoint) { return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-dup-netinfo-entry"); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (mnList.HasUniqueProperty(*domain_opt) && + mnList.GetUniquePropertyMN(*domain_opt)->collateralOutpoint != collateralOutpoint) { + return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-dup-netinfo-entry"); + } } else { return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-netinfo-entry"); } @@ -1224,6 +1249,10 @@ bool CheckProUpServTx(CDeterministicMNManager& dmnman, const CTransaction& tx, g mnList.GetUniquePropertyMN(*service_opt)->proTxHash != opt_ptx->proTxHash) { return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-dup-netinfo-entry"); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (mnList.HasUniqueProperty(*domain_opt) && mnList.GetUniquePropertyMN(*domain_opt)->proTxHash != opt_ptx->proTxHash) { + return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-dup-netinfo-entry"); + } } else { return state.Invalid(TxValidationResult::TX_BAD_SPECIAL, "bad-protx-netinfo-entry"); } diff --git a/src/evo/netinfo.cpp b/src/evo/netinfo.cpp index d9458edcc019..b8b09c74ff4c 100644 --- a/src/evo/netinfo.cpp +++ b/src/evo/netinfo.cpp @@ -124,6 +124,10 @@ bool NetInfoEntry::operator<(const NetInfoEntry& rhs) const if constexpr (std::is_same_v) { // Both the same type, compare as usual return lhs < rhs; + } else if constexpr ((std::is_same_v || std::is_same_v) && + (std::is_same_v || std::is_same_v)) { + // Differing types but both implement ToStringAddrPort(), lexicographical compare strings + return lhs.ToStringAddrPort() < rhs.ToStringAddrPort(); } // If lhs is monostate, it less than rhs; otherwise rhs is greater return std::is_same_v; @@ -140,12 +144,21 @@ std::optional NetInfoEntry::GetAddrPort() const return std::nullopt; } +std::optional NetInfoEntry::GetDomainPort() const +{ + if (const auto* data_ptr{std::get_if(&m_data)}; m_type == NetInfoType::Domain && data_ptr) { + ASSERT_IF_DEBUG(data_ptr->IsValid()); + return *data_ptr; + } + return std::nullopt; +} + uint16_t NetInfoEntry::GetPort() const { return std::visit( [](auto&& input) -> uint16_t { using T1 = std::decay_t; - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v || std::is_same_v) { return input.GetPort(); } return 0; @@ -162,7 +175,8 @@ bool NetInfoEntry::IsTriviallyValid() const return std::visit( [this](auto&& input) -> bool { using T1 = std::decay_t; - static_assert(std::is_same_v || std::is_same_v, "Unexpected type"); + static_assert(std::is_same_v || std::is_same_v || std::is_same_v, + "Unexpected type"); if constexpr (std::is_same_v) { // Empty underlying data isn't a valid entry return false; @@ -171,6 +185,11 @@ bool NetInfoEntry::IsTriviallyValid() const if (m_type != NetInfoType::Service) return false; // Underlying data must meet surface-level validity checks for its type if (!input.IsValid()) return false; + } else if constexpr (std::is_same_v) { + // Type code should be truthful as it decides what underlying type is used when (de)serializing + if (m_type != NetInfoType::Domain) return false; + // Underlying data should at least meet surface-level validity checks + if (!input.IsValid()) return false; } return true; }, @@ -184,6 +203,8 @@ std::string NetInfoEntry::ToString() const using T1 = std::decay_t; if constexpr (std::is_same_v) { return strprintf("CService(addr=%s, port=%u)", input.ToStringAddr(), input.GetPort()); + } else if constexpr (std::is_same_v) { + return strprintf("DomainPort(addr=%s, port=%u)", input.ToStringAddr(), input.GetPort()); } return "[invalid entry]"; }, @@ -195,7 +216,7 @@ std::string NetInfoEntry::ToStringAddr() const return std::visit( [](auto&& input) -> std::string { using T1 = std::decay_t; - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v || std::is_same_v) { return input.ToStringAddr(); } return "[invalid entry]"; @@ -208,7 +229,7 @@ std::string NetInfoEntry::ToStringAddrPort() const return std::visit( [](auto&& input) -> std::string { using T1 = std::decay_t; - if constexpr (std::is_same_v) { + if constexpr (std::is_same_v || std::is_same_v) { return input.ToStringAddrPort(); } return "[invalid entry]"; diff --git a/src/evo/netinfo.h b/src/evo/netinfo.h index 22367dda556d..e377df6b001d 100644 --- a/src/evo/netinfo.h +++ b/src/evo/netinfo.h @@ -156,6 +156,9 @@ class DomainPort public: DomainPort() = default; + template + DomainPort(deserialize_type, Stream& s) { s >> *this; } + ~DomainPort() = default; bool operator<(const DomainPort& rhs) const { return std::tie(m_addr, m_port) < std::tie(rhs.m_addr, rhs.m_port); } @@ -182,15 +185,22 @@ class NetInfoEntry public: enum NetInfoType : uint8_t { Service = 0x01, + Domain = 0x02, Invalid = 0xff }; private: uint8_t m_type{NetInfoType::Invalid}; - std::variant m_data{std::monostate{}}; + std::variant m_data{std::monostate{}}; public: NetInfoEntry() = default; + NetInfoEntry(const DomainPort& domain) + { + if (!domain.IsValid()) return; + m_type = NetInfoType::Domain; + m_data = domain; + } NetInfoEntry(const CService& service) { if (!service.IsValid()) return; @@ -213,6 +223,9 @@ class NetInfoEntry if (const auto* data_ptr{std::get_if(&m_data)}; m_type == NetInfoType::Service && data_ptr && data_ptr->IsValid()) { s << m_type << *data_ptr; + } else if (const auto* data_ptr{std::get_if(&m_data)}; + m_type == NetInfoType::Domain && data_ptr && data_ptr->IsValid()) { + s << m_type << *data_ptr; } else { s << NetInfoType::Invalid; } @@ -224,12 +237,17 @@ class NetInfoEntry OverrideStream s(&s_, /*nType=*/0, s_.GetVersion() | ADDRV2_FORMAT); s >> m_type; if (m_type == NetInfoType::Service) { - m_data = CService{}; try { - CService& service{std::get(m_data)}; + auto& service{m_data.emplace()}; s >> service; if (!service.IsValid()) { Clear(); } // Invalid CService, mark as invalid } catch (const std::ios_base::failure&) { Clear(); } // Deser failed, mark as invalid + } else if (m_type == NetInfoType::Domain) { + try { + auto& domain{m_data.emplace()}; + s >> domain; + if (!domain.IsValid()) { Clear(); } // Invalid DomainPort, mark as invalid + } catch (const std::ios_base::failure&) { Clear(); } // Deser failed, mark as invalid } else { Clear(); } // Invalid type code, mark as invalid } @@ -240,6 +258,7 @@ class NetInfoEntry } std::optional GetAddrPort() const; + std::optional GetDomainPort() const; uint16_t GetPort() const; bool IsEmpty() const { return *this == NetInfoEntry{}; } bool IsTriviallyValid() const; diff --git a/src/evo/specialtxman.cpp b/src/evo/specialtxman.cpp index 9fcffe753299..e7fe59efff89 100644 --- a/src/evo/specialtxman.cpp +++ b/src/evo/specialtxman.cpp @@ -262,6 +262,10 @@ bool CSpecialTxProcessor::BuildNewListFromBlock(const CBlock& block, gsl::not_nu if (newList.HasUniqueProperty(*service_opt)) { return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-dup-netinfo-entry"); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (newList.HasUniqueProperty(*domain_opt)) { + return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-dup-netinfo-entry"); + } } else { return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-netinfo-entry"); } @@ -298,6 +302,11 @@ bool CSpecialTxProcessor::BuildNewListFromBlock(const CBlock& block, gsl::not_nu newList.GetUniquePropertyMN(*service_opt)->proTxHash != opt_proTx->proTxHash) { return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-dup-netinfo-entry"); } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (newList.HasUniqueProperty(*domain_opt) && + newList.GetUniquePropertyMN(*domain_opt)->proTxHash != opt_proTx->proTxHash) { + return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-dup-netinfo-entry"); + } } else { return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-protx-netinfo-entry"); } From f04b0d2dea123981179d02991ff039a2c1ec7ed2 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Thu, 2 Oct 2025 18:44:47 +0000 Subject: [PATCH 047/656] evo: allow `ExtNetInfo` to accept `DomainPort` if `PLATFORM_HTTPS` --- src/evo/netinfo.cpp | 83 +++++++++++++++++++++++++++++++++- src/evo/netinfo.h | 1 + src/test/evo_netinfo_tests.cpp | 27 ++++++++++- 3 files changed, 107 insertions(+), 4 deletions(-) diff --git a/src/evo/netinfo.cpp b/src/evo/netinfo.cpp index b8b09c74ff4c..17279d0448b9 100644 --- a/src/evo/netinfo.cpp +++ b/src/evo/netinfo.cpp @@ -25,14 +25,46 @@ static constexpr uint8_t DOMAIN_MAX_LEN{253}; /** Minimum length of a FQDN */ static constexpr uint8_t DOMAIN_MIN_LEN{3}; +static constexpr std::string_view SAFE_CHARS_ALPHA{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"}; static constexpr std::string_view SAFE_CHARS_IPV4{"1234567890."}; static constexpr std::string_view SAFE_CHARS_IPV4_6{"abcdefABCDEF1234567890.:[]"}; static constexpr std::string_view SAFE_CHARS_RFC1035{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-"}; +static constexpr std::array TLDS_BAD{ + // ICANN resolution 2018.02.04.12 + ".mail", + // Infrastructure TLD + ".arpa", + // RFC 6761 + ".example", ".invalid", ".localhost", ".test", + // RFC 6762 + ".local", + // RFC 6762, Appendix G + ".corp", ".home", ".internal", ".intranet", ".lan", ".private", +}; bool MatchCharsFilter(std::string_view input, std::string_view filter) { return std::all_of(input.begin(), input.end(), [&filter](char c) { return filter.find(c) != std::string_view::npos; }); } + +bool MatchSuffix(const std::string& str, Span list) +{ + if (str.empty()) return false; + for (const auto& suffix : list) { + if (suffix.size() > str.size()) continue; + if (std::string_view{str}.ends_with(suffix)) return true; + } + return false; +} + +bool IsAllowedPlatformHTTPPort(uint16_t port) +{ + switch (port) { + case 443: + return true; + } + return false; +} } // anonymous namespace bool IsNodeOnMainnet() { return Params().NetworkIDString() == CBaseChainParams::MAIN; } @@ -387,6 +419,10 @@ NetInfoStatus ExtNetInfo::ProcessCandidate(const NetInfoPurpose purpose, const N if (IsAddrPortDuplicate(candidate)) { return NetInfoStatus::Duplicate; } + if (candidate.GetDomainPort().has_value() && purpose != NetInfoPurpose::PLATFORM_HTTPS) { + // Domains only allowed for Platform HTTPS API + return NetInfoStatus::BadInput; + } if (auto it{m_data.find(purpose)}; it != m_data.end()) { // Existing entries list found, check limit auto& [_, entries] = *it; @@ -426,6 +462,26 @@ NetInfoStatus ExtNetInfo::ValidateService(const CService& service) return NetInfoStatus::Success; } +NetInfoStatus ExtNetInfo::ValidateDomainPort(const DomainPort& domain) +{ + if (!domain.IsValid()) { + return NetInfoStatus::BadInput; + } + const uint16_t domain_port{domain.GetPort()}; + if (domain_port == 0 || (IsBadPort(domain_port) && !IsAllowedPlatformHTTPPort(domain_port))) { + return NetInfoStatus::BadPort; + } + const std::string& addr{domain.ToStringAddr()}; + if (MatchSuffix(addr, TLDS_BAD)) { + return NetInfoStatus::BadInput; + } + if (const auto labels{SplitString(addr, '.')}; !MatchCharsFilter(labels.at(labels.size() - 1), SAFE_CHARS_ALPHA)) { + return NetInfoStatus::BadInput; + } + + return NetInfoStatus::Success; +} + NetInfoStatus ExtNetInfo::AddEntry(const NetInfoPurpose purpose, const std::string& input) { if (!IsValidPurpose(purpose)) { @@ -437,11 +493,25 @@ NetInfoStatus ExtNetInfo::AddEntry(const NetInfoPurpose purpose, const std::stri std::string addr; uint16_t port{0}; SplitHostPort(input, port, addr); - // Contains invalid characters, unlikely to pass Lookup(), fast-fail + if (!MatchCharsFilter(addr, SAFE_CHARS_IPV4_6)) { - return NetInfoStatus::BadInput; + if (!MatchCharsFilter(addr, SAFE_CHARS_RFC1035)) { + // Neither IP:port safe nor domain-safe, we can safely assume it's bad input + return NetInfoStatus::BadInput; + } + + // Not IP:port safe but domain safe, treat as domain. + if (DomainPort domain; domain.Set(addr, port) == DomainPort::Status::Success) { + const auto ret{ValidateDomainPort(domain)}; + if (ret == NetInfoStatus::Success) { + return ProcessCandidate(purpose, NetInfoEntry{domain}); + } + return ret; /* ValidateDomainPort() failed */ + } + return NetInfoStatus::BadInput; /* DomainPort::Set() failed */ } + // IP:port safe, try to parse it as IP:port if (auto service_opt{Lookup(addr, /*portDefault=*/port, /*fAllowLookup=*/false)}) { const auto ret{ValidateService(*service_opt)}; if (ret == NetInfoStatus::Success) { @@ -516,6 +586,15 @@ NetInfoStatus ExtNetInfo::Validate() const // Stores CService underneath but doesn't pass validation rules return ret; } + } else if (const auto domain_opt{entry.GetDomainPort()}) { + if (purpose != NetInfoPurpose::PLATFORM_HTTPS) { + // Domains only allowed for Platform HTTPS API + return NetInfoStatus::BadInput; + } + if (auto ret{ValidateDomainPort(*domain_opt)}; ret != NetInfoStatus::Success) { + // Stores DomainPort underneath but doesn't pass validation rules + return ret; + } } else { // Doesn't store valid type underneath return NetInfoStatus::Malformed; diff --git a/src/evo/netinfo.h b/src/evo/netinfo.h index e377df6b001d..3ea51992b79e 100644 --- a/src/evo/netinfo.h +++ b/src/evo/netinfo.h @@ -384,6 +384,7 @@ class ExtNetInfo final : public NetInfoInterface /** Validate CService candidate address against ruleset */ static NetInfoStatus ValidateService(const CService& service); + static NetInfoStatus ValidateDomainPort(const DomainPort& domain); private: uint8_t m_version{CURRENT_VERSION}; diff --git a/src/test/evo_netinfo_tests.cpp b/src/test/evo_netinfo_tests.cpp index 509042f22dd1..3d79556e048a 100644 --- a/src/test/evo_netinfo_tests.cpp +++ b/src/test/evo_netinfo_tests.cpp @@ -40,8 +40,12 @@ static const std::vector addr_vals_main{ // - Non-IPv4 addresses are prohibited in MnNetInfo // - Any valid BIP155 address is allowed in ExtNetInfo {{NetInfoPurpose::CORE_P2P, "[2606:4700:4700::1111]:9999"}, NetInfoStatus::BadInput, NetInfoStatus::Success}, - // Domains are not allowed + // Domains are not allowed for Core P2P or Platform P2P {{NetInfoPurpose::CORE_P2P, "example.com:9999"}, NetInfoStatus::BadInput, NetInfoStatus::BadInput}, + {{NetInfoPurpose::PLATFORM_P2P, "example.com:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, + // - MnNetInfo doesn't allow storing anything except a Core P2P address + // - ExtNetInfo can store Platform HTTPS addresses *as domains* + {{NetInfoPurpose::PLATFORM_HTTPS, "example.com:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::Success}, // Incorrect IPv4 address {{NetInfoPurpose::CORE_P2P, "1.1.1.256:9999"}, NetInfoStatus::BadInput, NetInfoStatus::BadInput}, // Missing address @@ -182,6 +186,21 @@ BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) BOOST_CHECK(!netInfo.HasEntries(NetInfoPurpose::PLATFORM_HTTPS)); ValidateGetEntries(netInfo.GetEntries(), /*expected_size=*/2); } + + { + // ExtNetInfo has additional rules for domains + const std::vector domain_vals{ + // Port 80 (HTTP) is below the privileged ports threshold (1023), not allowed + {{NetInfoPurpose::PLATFORM_HTTPS, "example.com:80"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadPort}, + // Port 443 (HTTPS) is below the privileged ports threshold (1023) but still allowed + {{NetInfoPurpose::PLATFORM_HTTPS, "example.com:443"}, NetInfoStatus::MaxLimit, NetInfoStatus::Success}, + // TLDs must be alphabetic to avoid ambiguation with IP addresses (per ICANN guidelines) + {{NetInfoPurpose::PLATFORM_HTTPS, "example.123:443"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, + // .local is a prohibited TLD + {{NetInfoPurpose::PLATFORM_HTTPS, "somebodys-macbook-pro.local:9998"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, + }; + TestExtNetInfo(domain_vals); + } } BOOST_AUTO_TEST_CASE(netinfo_ser) @@ -399,11 +418,15 @@ BOOST_AUTO_TEST_CASE(domainport_rules) for (const auto& [addr, retval] : domain_vals) { DomainPort domain; - BOOST_CHECK_EQUAL(domain.Set(addr, 9999), retval); + ExtNetInfo netInfo; + BOOST_CHECK_EQUAL(domain.Set(addr, 443), retval); if (retval != DomainPort::Status::Success) { BOOST_CHECK_EQUAL(domain.Validate(), DomainPort::Status::Malformed); // Empty values report as Malformed + BOOST_CHECK_EQUAL(netInfo.AddEntry(NetInfoPurpose::PLATFORM_HTTPS, domain.ToStringAddrPort()), + NetInfoStatus::BadInput); } else { BOOST_CHECK_EQUAL(domain.Validate(), DomainPort::Status::Success); + BOOST_CHECK_EQUAL(netInfo.AddEntry(NetInfoPurpose::PLATFORM_HTTPS, domain.ToStringAddrPort()), NetInfoStatus::Success); } } From 69a92427744b18fb1b57120741e78bc740a13124 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 23 Sep 2025 15:00:41 +0000 Subject: [PATCH 048/656] evo: allow and recognize CJDNS addresses --- src/evo/netinfo.cpp | 7 ++++--- src/test/evo_netinfo_tests.cpp | 27 +++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/src/evo/netinfo.cpp b/src/evo/netinfo.cpp index 17279d0448b9..0e44affa85df 100644 --- a/src/evo/netinfo.cpp +++ b/src/evo/netinfo.cpp @@ -449,7 +449,7 @@ NetInfoStatus ExtNetInfo::ValidateService(const CService& service) if (!service.IsValid()) { return NetInfoStatus::BadAddress; } - if (!service.IsIPv4() && !service.IsIPv6()) { + if (!service.IsCJDNS() && !service.IsIPv4() && !service.IsIPv6()) { return NetInfoStatus::BadType; } if (Params().RequireRoutableExternalIP() && !service.IsRoutable()) { @@ -513,9 +513,10 @@ NetInfoStatus ExtNetInfo::AddEntry(const NetInfoPurpose purpose, const std::stri // IP:port safe, try to parse it as IP:port if (auto service_opt{Lookup(addr, /*portDefault=*/port, /*fAllowLookup=*/false)}) { - const auto ret{ValidateService(*service_opt)}; + const auto service{MaybeFlipIPv6toCJDNS(*service_opt)}; + const auto ret{ValidateService(service)}; if (ret == NetInfoStatus::Success) { - return ProcessCandidate(purpose, NetInfoEntry{*service_opt}); + return ProcessCandidate(purpose, NetInfoEntry{service}); } return ret; /* ValidateService() failed */ } diff --git a/src/test/evo_netinfo_tests.cpp b/src/test/evo_netinfo_tests.cpp index 3d79556e048a..f334b83aa240 100644 --- a/src/test/evo_netinfo_tests.cpp +++ b/src/test/evo_netinfo_tests.cpp @@ -141,6 +141,14 @@ static const std::vector addr_vals_reg{ {{NetInfoPurpose::CORE_P2P, "1.1.1.1:22"}, NetInfoStatus::Success, NetInfoStatus::BadPort}, }; +enum class ExpectedType : uint8_t { + CJDNS, +}; + +static const std::vector> privacy_addr_vals{ + {ExpectedType::CJDNS, "[fc00:3344:5566:7788:9900:aabb:ccdd:eeff]:9998", NetInfoStatus::Success}, +}; + BOOST_FIXTURE_TEST_CASE(mnnetinfo_rules_reg, RegTestingSetup) { TestMnNetInfo(addr_vals_reg); } BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) @@ -201,6 +209,25 @@ BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) }; TestExtNetInfo(domain_vals); } + + // Privacy network entry checks + for (const auto& [type, input, expected_ret] : privacy_addr_vals) { + const bool expected_success{expected_ret == NetInfoStatus::Success}; + + ExtNetInfo netInfo{}; + BOOST_CHECK_EQUAL(netInfo.AddEntry(NetInfoPurpose::CORE_P2P, input), expected_ret); + ValidateGetEntries(netInfo.GetEntries(), /*expected_size=*/expected_success ? 1 : 0); + if (!expected_success) continue; + + // Type registration check + const CService service{netInfo.GetEntries().at(0).GetAddrPort().value()}; + BOOST_CHECK(service.IsValid()); + switch (type) { + case ExpectedType::CJDNS: + BOOST_CHECK(service.IsCJDNS()); + break; + } // no default case, so the compiler can warn about missing cases + } } BOOST_AUTO_TEST_CASE(netinfo_ser) From 7b0de72e8e88cf5bee5c9c21eaa81af1c5f451c5 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 10 Aug 2025 19:29:23 +0000 Subject: [PATCH 049/656] evo: recognize and store I2P and onion domains in `ExtNetInfo` --- src/evo/deterministicmns.h | 12 +++++++++++- src/evo/netinfo.cpp | 35 +++++++++++++++++++++++++++------- src/test/evo_netinfo_tests.cpp | 26 +++++++++++++++++++++++-- 3 files changed, 63 insertions(+), 10 deletions(-) diff --git a/src/evo/deterministicmns.h b/src/evo/deterministicmns.h index 12643e5f1f1a..0353c46634fc 100644 --- a/src/evo/deterministicmns.h +++ b/src/evo/deterministicmns.h @@ -442,7 +442,17 @@ class CDeterministicMNList DMNL_NO_TEMPLATE(NetInfoInterface); DMNL_NO_TEMPLATE(std::shared_ptr); #undef DMNL_NO_TEMPLATE - return ::SerializeHash(v); + int ser_version{PROTOCOL_VERSION}; + if constexpr (std::is_same_v, CService>) { + // Special handling is required if we're using addresses that can only be (de)serialized using + // ADDRv2. Without this step, the address gets truncated, the hashmap gets contaminated with + // an invalid entry and subsequent attempts at registering ADDRv2 entries get blocked. We cannot + // apply this treatment ADDRv1 compatible addresses for backwards compatibility with the existing map. + if (!v.IsAddrV1Compatible()) { + ser_version |= ADDRV2_FORMAT; + } + } + return ::SerializeHash(v, /*nType=*/SER_GETHASH, /*nVersion=*/ser_version); } template [[nodiscard]] bool AddUniqueProperty(const CDeterministicMN& dmn, const T& v) diff --git a/src/evo/netinfo.cpp b/src/evo/netinfo.cpp index 0e44affa85df..3a8a920d46eb 100644 --- a/src/evo/netinfo.cpp +++ b/src/evo/netinfo.cpp @@ -41,6 +41,7 @@ static constexpr std::array TLDS_BAD{ // RFC 6762, Appendix G ".corp", ".home", ".internal", ".intranet", ".lan", ".private", }; +static constexpr std::array TLDS_PRIVACY{".i2p", ".onion"}; bool MatchCharsFilter(std::string_view input, std::string_view filter) { @@ -449,14 +450,22 @@ NetInfoStatus ExtNetInfo::ValidateService(const CService& service) if (!service.IsValid()) { return NetInfoStatus::BadAddress; } - if (!service.IsCJDNS() && !service.IsIPv4() && !service.IsIPv6()) { + if (!service.IsCJDNS() && !service.IsI2P() && !service.IsIPv4() && !service.IsIPv6() && !service.IsTor()) { return NetInfoStatus::BadType; } if (Params().RequireRoutableExternalIP() && !service.IsRoutable()) { return NetInfoStatus::NotRoutable; } - if (IsBadPort(service.GetPort()) || service.GetPort() == 0) { - return NetInfoStatus::BadPort; + const uint16_t service_port{service.GetPort()}; + if (service.IsI2P()) { + if (service_port != I2P_SAM31_PORT) { + // I2P SAM 3.1 and earlier don't support arbitrary ports + return NetInfoStatus::BadPort; + } + } else { + if (service_port == 0 || IsBadPort(service_port)) { + return NetInfoStatus::BadPort; + } } return NetInfoStatus::Success; @@ -472,7 +481,7 @@ NetInfoStatus ExtNetInfo::ValidateDomainPort(const DomainPort& domain) return NetInfoStatus::BadPort; } const std::string& addr{domain.ToStringAddr()}; - if (MatchSuffix(addr, TLDS_BAD)) { + if (MatchSuffix(addr, TLDS_BAD) || MatchSuffix(addr, TLDS_PRIVACY)) { return NetInfoStatus::BadInput; } if (const auto labels{SplitString(addr, '.')}; !MatchCharsFilter(labels.at(labels.size() - 1), SAFE_CHARS_ALPHA)) { @@ -500,15 +509,27 @@ NetInfoStatus ExtNetInfo::AddEntry(const NetInfoPurpose purpose, const std::stri return NetInfoStatus::BadInput; } - // Not IP:port safe but domain safe, treat as domain. - if (DomainPort domain; domain.Set(addr, port) == DomainPort::Status::Success) { + // Not IP:port safe but domain safe + if (MatchSuffix(addr, TLDS_PRIVACY)) { + // Special domain, try storing it as CService + CNetAddr netaddr; + if (netaddr.SetSpecial(addr)) { + const CService service{netaddr, port}; + const auto ret{ValidateService(service)}; + if (ret == NetInfoStatus::Success) { + return ProcessCandidate(purpose, NetInfoEntry{service}); + } + return ret; /* ValidateService() failed */ + } + } else if (DomainPort domain; domain.Set(addr, port) == DomainPort::Status::Success) { + // Regular domain const auto ret{ValidateDomainPort(domain)}; if (ret == NetInfoStatus::Success) { return ProcessCandidate(purpose, NetInfoEntry{domain}); } return ret; /* ValidateDomainPort() failed */ } - return NetInfoStatus::BadInput; /* DomainPort::Set() failed */ + return NetInfoStatus::BadInput; /* CNetAddr::SetSpecial() or DomainPort::Set() failed */ } // IP:port safe, try to parse it as IP:port diff --git a/src/test/evo_netinfo_tests.cpp b/src/test/evo_netinfo_tests.cpp index f334b83aa240..a1df594c820b 100644 --- a/src/test/evo_netinfo_tests.cpp +++ b/src/test/evo_netinfo_tests.cpp @@ -40,12 +40,16 @@ static const std::vector addr_vals_main{ // - Non-IPv4 addresses are prohibited in MnNetInfo // - Any valid BIP155 address is allowed in ExtNetInfo {{NetInfoPurpose::CORE_P2P, "[2606:4700:4700::1111]:9999"}, NetInfoStatus::BadInput, NetInfoStatus::Success}, - // Domains are not allowed for Core P2P or Platform P2P + // - MnNetInfo doesn't allow storing anything except a Core P2P address + // - Privacy network domains are allowed in ExtNetInfo but internet domains are not {{NetInfoPurpose::CORE_P2P, "example.com:9999"}, NetInfoStatus::BadInput, NetInfoStatus::BadInput}, + {{NetInfoPurpose::CORE_P2P, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:9999"}, NetInfoStatus::BadInput, NetInfoStatus::Success}, {{NetInfoPurpose::PLATFORM_P2P, "example.com:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, + {{NetInfoPurpose::PLATFORM_P2P, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::Success}, // - MnNetInfo doesn't allow storing anything except a Core P2P address - // - ExtNetInfo can store Platform HTTPS addresses *as domains* + // - ExtNetInfo can store Platform HTTPS addresses *as domains* alongside privacy network domains {{NetInfoPurpose::PLATFORM_HTTPS, "example.com:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::Success}, + {{NetInfoPurpose::PLATFORM_HTTPS, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:9999"}, NetInfoStatus::MaxLimit, NetInfoStatus::Success}, // Incorrect IPv4 address {{NetInfoPurpose::CORE_P2P, "1.1.1.256:9999"}, NetInfoStatus::BadInput, NetInfoStatus::BadInput}, // Missing address @@ -143,10 +147,20 @@ static const std::vector addr_vals_reg{ enum class ExpectedType : uint8_t { CJDNS, + I2P, + Tor, }; static const std::vector> privacy_addr_vals{ {ExpectedType::CJDNS, "[fc00:3344:5566:7788:9900:aabb:ccdd:eeff]:9998", NetInfoStatus::Success}, + // ExtNetInfo can store I2P addresses as long as it uses port 0 + {ExpectedType::I2P, "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.i2p:0", NetInfoStatus::Success}, + // ExtNetInfo can store onion addresses + {ExpectedType::Tor, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:9998", NetInfoStatus::Success}, + // ExtNetInfo can store I2P addresses but non-zero ports are not allowed + {ExpectedType::I2P, "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.i2p:9998", NetInfoStatus::BadPort}, + // ExtNetInfo can store onion addresses but zero ports are not allowed + {ExpectedType::Tor, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:0", NetInfoStatus::BadPort}, }; BOOST_FIXTURE_TEST_CASE(mnnetinfo_rules_reg, RegTestingSetup) { TestMnNetInfo(addr_vals_reg); } @@ -206,6 +220,8 @@ BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) {{NetInfoPurpose::PLATFORM_HTTPS, "example.123:443"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, // .local is a prohibited TLD {{NetInfoPurpose::PLATFORM_HTTPS, "somebodys-macbook-pro.local:9998"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, + // DomainPort isn't used for storing privacy network TLDs like .onion + {{NetInfoPurpose::PLATFORM_HTTPS, "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd:9998"}, NetInfoStatus::MaxLimit, NetInfoStatus::BadInput}, }; TestExtNetInfo(domain_vals); } @@ -226,6 +242,12 @@ BOOST_FIXTURE_TEST_CASE(extnetinfo_rules_reg, RegTestingSetup) case ExpectedType::CJDNS: BOOST_CHECK(service.IsCJDNS()); break; + case ExpectedType::I2P: + BOOST_CHECK(service.IsI2P()); + break; + case ExpectedType::Tor: + BOOST_CHECK(service.IsTor()); + break; } // no default case, so the compiler can warn about missing cases } } From 2ba86f91c56ba69bf218c37fc9d2ae8fa9f8f80e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 10 Aug 2025 13:09:42 +0000 Subject: [PATCH 050/656] test: use domains in functional test, add test for unique map checking --- test/functional/rpc_netinfo.py | 65 ++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/test/functional/rpc_netinfo.py b/test/functional/rpc_netinfo.py index 5852fa4e40cc..8fb9cb2e6448 100755 --- a/test/functional/rpc_netinfo.py +++ b/test/functional/rpc_netinfo.py @@ -35,6 +35,21 @@ PROTXVER_BASIC = 2 PROTXVER_EXTADDR = 3 +# Sample domains +DOMAINS_CLR = [ + "server-1.example.com", + "server-2.example.com", +] +DOMAINS_TOR = [ + "kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion", + "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", +] +DOMAINS_I2P = [ + "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", + "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.i2p", + "ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p", +] + class EvoNode: mn: MasternodeInfo node: TestNode @@ -191,6 +206,12 @@ def run_test(self): self.test_empty_fields() self.log.info("Test output masternode address fields for consistency (post-fork)") self.test_shims() + # Need to destroy masternodes as the next test will be re-creating them + self.node_evo.destroy_mn(self) + self.node_two.destroy_mn(self) + self.reconnect_nodes() + self.log.info("Test unique properties map duplication checks") + self.test_uniqueness() def test_validation_common(self): # Arrays of addresses with invalid inputs get refused @@ -326,6 +347,15 @@ def test_validation_extended(self): self.node_evo.register_mn(self, False, f"127.0.0.1:{self.node_evo.mn.nodePort}", DEFAULT_PORT_PLATFORM_P2P, [f"127.0.0.1:{DEFAULT_PORT_PLATFORM_HTTP}"])])[0]['allowed'] + # coreP2PAddrs and platformP2PAddrs accept privacy network domains and platformHTTPSAddrs additionally supports internet domains + # Note: I2P entries cannot be differentiated by port, they must always use port 0 + assert self.node_evo.node.testmempoolaccept([ + self.node_evo.register_mn(self, False, + [f"127.0.0.1:{self.node_evo.mn.nodePort}", f"{DOMAINS_TOR[0]}:{self.node_evo.mn.nodePort}", f"{DOMAINS_I2P[0]}:0"], + [f"127.0.0.1:{DEFAULT_PORT_PLATFORM_P2P}", f"{DOMAINS_TOR[0]}:{DEFAULT_PORT_PLATFORM_P2P}", f"{DOMAINS_I2P[1]}:0"], + [f"127.0.0.1:{DEFAULT_PORT_PLATFORM_HTTP}", f"{DOMAINS_TOR[0]}:{DEFAULT_PORT_PLATFORM_HTTP}", f"{DOMAINS_I2P[2]}:0", + f"{DOMAINS_CLR[0]}:{DEFAULT_PORT_PLATFORM_HTTP}"] )])[0]['allowed'] + # Port numbers may not be wrapped in arrays, either as integers or strings self.node_evo.register_mn(self, False, f"127.0.0.1:{self.node_evo.mn.nodePort}", [DEFAULT_PORT_PLATFORM_P2P], DEFAULT_PORT_PLATFORM_HTTP, -8, "Invalid param for platformP2PAddrs[0], must be string") @@ -511,5 +541,40 @@ def test_shims(self): self.node_evo.set_active_state(self, False) self.reconnect_nodes() + def test_uniqueness(self): + # Empty registrations are not registered as conflicts + self.node_evo.register_mn(self, True, "", "", "") + self.node_two.register_mn(self, True, "", "", "") + + # Validate that the unique properties map correctly recognizes entries as duplicates + self.node_evo.update_mn(self, True, + [f"127.0.0.1:{self.node_evo.mn.nodePort}", f"{DOMAINS_TOR[0]}:{self.node_evo.mn.nodePort}"], + [f"127.0.0.1:{DEFAULT_PORT_PLATFORM_P2P}", f"{DOMAINS_I2P[0]}:0"], + [f"127.0.0.1:{DEFAULT_PORT_PLATFORM_HTTP}", f"{DOMAINS_CLR[0]}:{DEFAULT_PORT_PLATFORM_HTTP}"]) + + def update_node_two(self, duplicate_addr = None, duplicate_tor = None, duplicate_i2p = None, duplicate_domain = None): + args = [ + self, True, + [duplicate_addr or f"127.0.0.2:{self.node_two.mn.nodePort}", duplicate_tor or f"{DOMAINS_TOR[1]}:{self.node_two.mn.nodePort}"], + [f"127.0.0.2:{DEFAULT_PORT_PLATFORM_P2P}", duplicate_i2p or f"{DOMAINS_I2P[1]}:0"], + [f"127.0.0.2:{DEFAULT_PORT_PLATFORM_HTTP}", duplicate_domain or f"{DOMAINS_CLR[1]}:{DEFAULT_PORT_PLATFORM_HTTP}"] + ] + if duplicate_addr or duplicate_tor or duplicate_i2p or duplicate_domain: + args += [-1, "bad-protx-dup-netinfo-entry"] + self.node_two.update_mn(*args) + + # Check for detection of duplicate IP:addr (CService) + update_node_two(self, duplicate_addr=f"127.0.0.1:{self.node_evo.mn.nodePort}") + + # Check for detection of duplicate privacy addr (CService) + update_node_two(self, duplicate_tor=f"{DOMAINS_TOR[0]}:{self.node_evo.mn.nodePort}") + update_node_two(self, duplicate_i2p=f"{DOMAINS_I2P[0]}:0") + + # Check for detection of duplicate internet addr (DomainPort) + update_node_two(self, duplicate_domain=f"{DOMAINS_CLR[0]}:{DEFAULT_PORT_PLATFORM_HTTP}") + + # All non-duplicate entries should still succeed + update_node_two(self) + if __name__ == "__main__": NetInfoTest().main() From 739d9f1cd92f6be50998593f0c53bfaea7f583f8 Mon Sep 17 00:00:00 2001 From: pasta Date: Thu, 2 Oct 2025 19:52:31 -0500 Subject: [PATCH 051/656] refactor: allow wallet to select fresh change address to prevent address reuse in coinjoin createdenoms --- src/coinjoin/util.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coinjoin/util.cpp b/src/coinjoin/util.cpp index dd4c27281217..1d6a503f71b3 100644 --- a/src/coinjoin/util.cpp +++ b/src/coinjoin/util.cpp @@ -125,8 +125,8 @@ CTransactionBuilder::CTransactionBuilder(CWallet& wallet, const CompactTallyItem coinControl.m_discard_feerate = ::GetDiscardRate(m_wallet); // Generate a feerate which will be used by calculations of this class and also by CWallet::CreateTransaction coinControl.m_feerate = std::max(GetRequiredFeeRate(m_wallet), m_wallet.m_pay_tx_fee); - // Change always goes back to origin - coinControl.destChange = tallyItemIn.txdest; + // Do not force change to go back to the origin address; let the wallet + // select a fresh change destination to avoid address reuse. // Only allow tallyItems inputs for tx creation coinControl.m_allow_other_inputs = false; // Create dummy tx to calculate the exact required fees upfront for accurate amount and fee calculations From 6ca08f6846fa4ed920dbc39e07a74848bbd8da1e Mon Sep 17 00:00:00 2001 From: pasta Date: Thu, 2 Oct 2025 21:30:14 -0500 Subject: [PATCH 052/656] docs: add release notes for CoinJoin change address improvement in wallet --- doc/release-notes-6870.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/release-notes-6870.md diff --git a/doc/release-notes-6870.md b/doc/release-notes-6870.md new file mode 100644 index 000000000000..8d2fa9609e69 --- /dev/null +++ b/doc/release-notes-6870.md @@ -0,0 +1,8 @@ +Wallet +------ + +- CoinJoin denomination creation now uses a fresh change address instead of + reusing the source address. This avoids address/public key reuse and aligns + with wallet best practices. (#6870) + + From d6e9d97da995c74b85597ef976df83857a6ca125 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 21 Apr 2023 11:40:09 +0100 Subject: [PATCH 053/656] Merge bitcoin/bitcoin#27464: fuzz: re-enable prioritisetransaction & analyzepsbt RPC faa7144d3cf41e6410d942a3c485982ee65b3c6e fuzz: re-enable prioritisetransaction & analyzepsbt RPC (MarcoFalke) Pull request description: The linked issue seems fixed, so it should be fine to re-enable ACKs for top commit: dergoegge: utACK faa7144d3cf41e6410d942a3c485982ee65b3c6e Tree-SHA512: a681c726fceacc27ab5a03d455c7808d33f3cb11fe7d253d455526568af840b29f0c3c1d97c54785ef9277e7891a3aa742ac73ccd3cf115b7606eba50864aaa9 --- src/test/fuzz/rpc.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 892823dea067..c959523a1681 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -70,7 +70,6 @@ const std::vector RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ "addconnection", // avoid DNS lookups "addnode", // avoid DNS lookups "addpeeraddress", // avoid DNS lookups - "analyzepsbt", // avoid signed integer overflow in CFeeRate::GetFee(unsigned long) (https://github.com/bitcoin/bitcoin/issues/20607) "dumptxoutset", // avoid writing to disk "dumpwallet", // avoid writing to disk "echoipc", // avoid assertion failure (Assertion `"EnsureAnyNodeContext(request.context).init" && check' failed.) @@ -79,7 +78,6 @@ const std::vector RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ "gettxoutproof", // avoid prohibitively slow execution "importwallet", // avoid reading from disk "loadwallet", // avoid reading from disk - "prioritisetransaction", // avoid signed integer overflow in CTxMemPool::PrioritiseTransaction(uint256 const&, long const&) (https://github.com/bitcoin/bitcoin/issues/20626) "savemempool", // disabled as a precautionary measure: may take a file path argument in the future "setban", // avoid DNS lookups "stop", // avoid shutdown state @@ -87,6 +85,7 @@ const std::vector RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ // RPC commands which are safe for fuzzing. const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ + "analyzepsbt", "clearbanned", "combinepsbt", "combinerawtransaction", @@ -138,6 +137,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "getrpcinfo", "gettxout", "gettxoutsetinfo", + "gettxspendingprevout", "help", "invalidateblock", "joinpsbts", @@ -146,6 +146,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "mockscheduler", "ping", "preciousblock", + "prioritisetransaction", "pruneblockchain", "reconsiderblock", "scantxoutset", From df3de12af424ed57c04d22be400de69852cab036 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Tue, 21 Mar 2023 13:42:14 -0400 Subject: [PATCH 054/656] Merge bitcoin/bitcoin#27278: Log new headers 2c3a90f663a61ee147d785167a2902494d81d34d log: on new valid header (James O'Beirne) e5ce8576349d404c466b2f4cab1ca7bf920904b2 log: net: new header over cmpctblock (James O'Beirne) Pull request description: Alternate to #27276. Devs were [suprised to realize](https://twitter.com/jamesob/status/1637237917201383425) last night that we don't have definitive logging for when a given header was first received. This logs to the main stream when new headers are received outside of IBD, as well as when headers come in over cmpctblocks. The rationale of not hiding these under log categories is that they may be useful to have widely available when debugging strange network activity, and the marginal volume is modest. ACKs for top commit: dergoegge: Code review ACK 2c3a90f663a61ee147d785167a2902494d81d34d achow101: ACK 2c3a90f663a61ee147d785167a2902494d81d34d Sjors: tACK 2c3a90f663a61ee147d785167a2902494d81d34d josibake: ACK https://github.com/bitcoin/bitcoin/pull/27278/commits/2c3a90f663a61ee147d785167a2902494d81d34d Tree-SHA512: 49fdcbe07799c8adc24143d7e5054a0c93fef120d2e9d5fddbd3b119550d895e2985be6ac10dd1825ea23a6fa5479c1b76d5518c136fbd983fa76c0d39dc354f --- src/net_processing.cpp | 16 +++++++++++----- src/validation.cpp | 18 +++++++++++++++++- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 3840ed5e5748..5605f533045e 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -4610,6 +4610,7 @@ void PeerManagerImpl::ProcessMessage( vRecv >> cmpctblock; bool received_new_header = false; + const auto blockhash = cmpctblock.header.GetHash(); { LOCK(cs_main); @@ -4623,7 +4624,7 @@ void PeerManagerImpl::ProcessMessage( return; } - if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.GetHash())) { + if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) { received_new_header = true; } } @@ -4637,6 +4638,11 @@ void PeerManagerImpl::ProcessMessage( } } + if (received_new_header) { + LogPrintfCategory(BCLog::NET, "Saw new cmpctblock header hash=%s peer=%d\n", + blockhash.ToString(), pfrom.GetId()); + } + // When we succeed in decoding a block's txids from a cmpctblock // message we typically jump to the BLOCKTXN handling code, with a // dummy (empty) BLOCKTXN message, to re-use the logic there in @@ -4679,7 +4685,7 @@ void PeerManagerImpl::ProcessMessage( // We requested this block for some reason, but our mempool will probably be useless // so we just grab the block via normal getdata std::vector vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK, blockhash); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return; @@ -4714,7 +4720,7 @@ void PeerManagerImpl::ProcessMessage( } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so just request it std::vector vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK, blockhash); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } @@ -4727,7 +4733,7 @@ void PeerManagerImpl::ProcessMessage( if (req.indexes.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions) BlockTransactions txn; - txn.blockhash = cmpctblock.header.GetHash(); + txn.blockhash = blockhash; blockTxnMsg << txn; fProcessBLOCKTXN = true; } else { @@ -4757,7 +4763,7 @@ void PeerManagerImpl::ProcessMessage( // We requested this block, but its far into the future, so our // mempool will probably be useless - request the block normally std::vector vInv(1); - vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK, blockhash); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } else { diff --git a/src/validation.cpp b/src/validation.cpp index 47f22053b226..ffc92634ad07 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4217,9 +4217,25 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida if (ppindex) *ppindex = pindex; + // Since this is the earliest point at which we have determined that a + // header is both new and valid, log here. + // + // These messages are valuable for detecting potential selfish mining behavior; + // if multiple displacing headers are seen near simultaneously across many + // nodes in the network, this might be an indication of selfish mining. Having + // this log by default when not in IBD ensures broad availability of this data + // in case investigation is merited. + const auto msg = strprintf( + "Saw new header hash=%s height=%d", hash.ToString(), pindex->nHeight); + + if (ActiveChainstate().IsInitialBlockDownload()) { + LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Debug, "%s\n", msg); + } else { + LogPrintf("%s\n", msg); + } + // Notify external listeners about accepted block header GetMainSignals().AcceptedBlockHeader(pindex); - return true; } From d4044d45c76e968b398b8339c35e1091426de30d Mon Sep 17 00:00:00 2001 From: pasta Date: Fri, 3 Oct 2025 09:29:54 -0500 Subject: [PATCH 055/656] feat: introduce -coinjoinfreshchange option to enhance CoinJoin change address handling Added a new command-line option `-coinjoinfreshchange` to allow users to control whether change from CoinJoin denomination creation is sent to a fresh change address or back to the source address. This change improves privacy by preventing address reuse. Updated relevant documentation and code to support this feature. --- doc/release-notes-6870.md | 7 ++++--- src/coinjoin/options.cpp | 8 ++++++++ src/coinjoin/options.h | 4 ++++ src/coinjoin/util.cpp | 9 +++++++-- src/wallet/init.cpp | 1 + 5 files changed, 24 insertions(+), 5 deletions(-) diff --git a/doc/release-notes-6870.md b/doc/release-notes-6870.md index 8d2fa9609e69..c8225e49fefb 100644 --- a/doc/release-notes-6870.md +++ b/doc/release-notes-6870.md @@ -1,8 +1,9 @@ Wallet ------ -- CoinJoin denomination creation now uses a fresh change address instead of - reusing the source address. This avoids address/public key reuse and aligns - with wallet best practices. (#6870) +- Add `-coinjoinfreshchange` option to control change destination behavior + during CoinJoin denomination creation. By default (flag unset), change is + sent back to the source address (legacy behavior). When enabled, change is + sent to a fresh change address to avoid address/public key reuse. (#6870) diff --git a/src/coinjoin/options.cpp b/src/coinjoin/options.cpp index f6e3e133d0e1..6b5e0acf33d4 100644 --- a/src/coinjoin/options.cpp +++ b/src/coinjoin/options.cpp @@ -61,11 +61,18 @@ void CCoinJoinClientOptions::SetDenomsHardCap(int denoms_hardcap) options.nCoinJoinDenomsHardCap = denoms_hardcap; } +void CCoinJoinClientOptions::SetFreshChange(bool fresh_change) +{ + CCoinJoinClientOptions& options = CCoinJoinClientOptions::Get(); + options.fCoinJoinFreshChange = fresh_change; +} + void CCoinJoinClientOptions::Init() { assert(!CCoinJoinClientOptions::_instance); static CCoinJoinClientOptions instance; instance.fCoinJoinMultiSession = gArgs.GetBoolArg("-coinjoinmultisession", DEFAULT_COINJOIN_MULTISESSION); + instance.fCoinJoinFreshChange = gArgs.GetBoolArg("-coinjoinfreshchange", DEFAULT_COINJOIN_FRESHCHANGE); instance.nCoinJoinSessions = std::min(std::max((int)gArgs.GetIntArg("-coinjoinsessions", DEFAULT_COINJOIN_SESSIONS), MIN_COINJOIN_SESSIONS), MAX_COINJOIN_SESSIONS); instance.nCoinJoinRounds = std::min(std::max((int)gArgs.GetIntArg("-coinjoinrounds", DEFAULT_COINJOIN_ROUNDS), MIN_COINJOIN_ROUNDS), MAX_COINJOIN_ROUNDS); instance.nCoinJoinAmount = std::min(std::max((int)gArgs.GetIntArg("-coinjoinamount", DEFAULT_COINJOIN_AMOUNT), MIN_COINJOIN_AMOUNT), MAX_COINJOIN_AMOUNT); @@ -85,4 +92,5 @@ void CCoinJoinClientOptions::GetJsonInfo(UniValue& obj) obj.pushKV("max_amount", options.nCoinJoinAmount.load()); obj.pushKV("denoms_goal", options.nCoinJoinDenomsGoal.load()); obj.pushKV("denoms_hardcap", options.nCoinJoinDenomsHardCap.load()); + obj.pushKV("fresh_change", options.fCoinJoinFreshChange.load()); } diff --git a/src/coinjoin/options.h b/src/coinjoin/options.h index b5e8c8a23f5a..809f192a0d50 100644 --- a/src/coinjoin/options.h +++ b/src/coinjoin/options.h @@ -31,6 +31,7 @@ static constexpr int DEFAULT_COINJOIN_DENOMS_HARDCAP = 300; static constexpr bool DEFAULT_COINJOIN_AUTOSTART = false; static constexpr bool DEFAULT_COINJOIN_MULTISESSION = false; +static constexpr bool DEFAULT_COINJOIN_FRESHCHANGE = false; // How many new denom outputs to create before we consider the "goal" loop in CreateDenominated // a final one and start creating an actual tx. Same limit applies for the "hard cap" part of the algo. @@ -60,6 +61,7 @@ class CCoinJoinClientOptions static int GetAmount() { return CCoinJoinClientOptions::Get().nCoinJoinAmount; } static int GetDenomsGoal() { return CCoinJoinClientOptions::Get().nCoinJoinDenomsGoal; } static int GetDenomsHardCap() { return CCoinJoinClientOptions::Get().nCoinJoinDenomsHardCap; } + static bool GetFreshChange() { return CCoinJoinClientOptions::Get().fCoinJoinFreshChange; } static void SetEnabled(bool fEnabled); static void SetMultiSessionEnabled(bool fEnabled); @@ -68,6 +70,7 @@ class CCoinJoinClientOptions static void SetAmount(CAmount amount); static void SetDenomsGoal(int denoms_goal); static void SetDenomsHardCap(int denoms_hardcap); + static void SetFreshChange(bool fresh_change); static bool IsEnabled() { return CCoinJoinClientOptions::Get().fEnableCoinJoin; } static bool IsMultiSessionEnabled() { return CCoinJoinClientOptions::Get().fCoinJoinMultiSession; } @@ -86,6 +89,7 @@ class CCoinJoinClientOptions std::atomic nCoinJoinDenomsHardCap{DEFAULT_COINJOIN_DENOMS_HARDCAP}; std::atomic fEnableCoinJoin{false}; std::atomic fCoinJoinMultiSession{DEFAULT_COINJOIN_MULTISESSION}; + std::atomic fCoinJoinFreshChange{DEFAULT_COINJOIN_FRESHCHANGE}; CCoinJoinClientOptions() = default; diff --git a/src/coinjoin/util.cpp b/src/coinjoin/util.cpp index 1d6a503f71b3..2858cef6a694 100644 --- a/src/coinjoin/util.cpp +++ b/src/coinjoin/util.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -125,8 +126,12 @@ CTransactionBuilder::CTransactionBuilder(CWallet& wallet, const CompactTallyItem coinControl.m_discard_feerate = ::GetDiscardRate(m_wallet); // Generate a feerate which will be used by calculations of this class and also by CWallet::CreateTransaction coinControl.m_feerate = std::max(GetRequiredFeeRate(m_wallet), m_wallet.m_pay_tx_fee); - // Do not force change to go back to the origin address; let the wallet - // select a fresh change destination to avoid address reuse. + // By default, keep legacy behavior: change goes back to the origin address. + // When -coinjoinfreshchange is enabled, let the wallet select a fresh + // change destination to avoid address reuse. + if (!CCoinJoinClientOptions::GetFreshChange()) { + coinControl.destChange = tallyItemIn.txdest; + } // Only allow tallyItems inputs for tx creation coinControl.m_allow_other_inputs = false; // Create dummy tx to calculate the exact required fees upfront for accurate amount and fee calculations diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 4aa657beb8d3..780828f1a5d6 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -105,6 +105,7 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const argsman.AddArg("-coinjoinmultisession", strprintf("Enable multiple CoinJoin mixing sessions per block, experimental (0-1, default: %u)", DEFAULT_COINJOIN_MULTISESSION), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinrounds=", strprintf("Use N separate masternodes for each denominated input to mix funds (%u-%u, default: %u)", MIN_COINJOIN_ROUNDS, MAX_COINJOIN_ROUNDS, DEFAULT_COINJOIN_ROUNDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinsessions=", strprintf("Use N separate masternodes in parallel to mix funds (%u-%u, default: %u)", MIN_COINJOIN_SESSIONS, MAX_COINJOIN_SESSIONS, DEFAULT_COINJOIN_SESSIONS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); + argsman.AddArg("-coinjoinfreshchange", strprintf("Send change from denomination creation to a fresh change address instead of the source address (0-1, default: %u)", DEFAULT_COINJOIN_FRESHCHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); #ifdef USE_BDB argsman.AddArg("-dblogsize=", strprintf("Flush wallet database activity from memory to disk log every megabytes (default: %u)", DatabaseOptions().max_log_mb), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); From 191c79c813fcfb278aedac10c44314b38598d2f7 Mon Sep 17 00:00:00 2001 From: pasta Date: Fri, 3 Oct 2025 09:48:53 -0500 Subject: [PATCH 056/656] refactor: update thread-safety annotations to use EXCLUSIVE_LOCKS_REQUIRED for various methods in meta store and info --- src/masternode/meta.cpp | 14 +++++++------- src/masternode/meta.h | 20 ++++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/masternode/meta.cpp b/src/masternode/meta.cpp index 783615c1b222..93dda0796f13 100644 --- a/src/masternode/meta.cpp +++ b/src/masternode/meta.cpp @@ -66,7 +66,7 @@ void CMasternodeMetaInfo::RemoveGovernanceObject(const uint256& nGovernanceObjec mapGovernanceObjectsVotedOn.erase(nGovernanceObjectHash); } -CMasternodeMetaInfoPtr CMasternodeMetaMan::GetMetaInfo(const uint256& proTxHash, bool fCreate) LOCKS_EXCLUDED(cs) +CMasternodeMetaInfoPtr CMasternodeMetaMan::GetMetaInfo(const uint256& proTxHash, bool fCreate) EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); auto it = metaInfos.find(proTxHash); @@ -115,7 +115,7 @@ bool CMasternodeMetaMan::AddGovernanceVote(const uint256& proTxHash, const uint2 return true; } -void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObjectHash) LOCKS_EXCLUDED(cs) +void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObjectHash) EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); for(const auto& p : metaInfos) { @@ -123,20 +123,20 @@ void CMasternodeMetaMan::RemoveGovernanceObject(const uint256& nGovernanceObject } } -std::vector CMasternodeMetaMan::GetAndClearDirtyGovernanceObjectHashes() LOCKS_EXCLUDED(cs) +std::vector CMasternodeMetaMan::GetAndClearDirtyGovernanceObjectHashes() EXCLUSIVE_LOCKS_REQUIRED(!cs) { std::vector vecTmp; WITH_LOCK(cs, vecTmp.swap(vecDirtyGovernanceObjectHashes)); return vecTmp; } -bool CMasternodeMetaMan::AlreadyHavePlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs) +bool CMasternodeMetaMan::AlreadyHavePlatformBan(const uint256& inv_hash) const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); return m_seen_platform_bans.exists(inv_hash); } -std::optional CMasternodeMetaMan::GetPlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs) +std::optional CMasternodeMetaMan::GetPlatformBan(const uint256& inv_hash) const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); PlatformBanMessage ret; @@ -147,13 +147,13 @@ std::optional CMasternodeMetaMan::GetPlatformBan(const uint2 return ret; } -void CMasternodeMetaMan::RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) LOCKS_EXCLUDED(cs) +void CMasternodeMetaMan::RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); m_seen_platform_bans.insert(inv_hash, std::move(msg)); } -std::string MasternodeMetaStore::ToString() const LOCKS_EXCLUDED(cs) +std::string MasternodeMetaStore::ToString() const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); return strprintf("Masternodes: meta infos object count: %d, nDsqCount: %d", metaInfos.size(), nDsqCount); diff --git a/src/masternode/meta.h b/src/masternode/meta.h index 39e4902cf446..8d8df8d0df92 100644 --- a/src/masternode/meta.h +++ b/src/masternode/meta.h @@ -134,7 +134,7 @@ class MasternodeMetaStore public: template - void Serialize(Stream &s) const LOCKS_EXCLUDED(cs) + void Serialize(Stream &s) const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); std::vector tmpMetaInfo; @@ -145,7 +145,7 @@ class MasternodeMetaStore } template - void Unserialize(Stream &s) LOCKS_EXCLUDED(cs) + void Unserialize(Stream &s) EXCLUSIVE_LOCKS_REQUIRED(!cs) { Clear(); @@ -163,14 +163,14 @@ class MasternodeMetaStore } } - void Clear() LOCKS_EXCLUDED(cs) + void Clear() EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); metaInfos.clear(); } - std::string ToString() const LOCKS_EXCLUDED(cs); + std::string ToString() const EXCLUSIVE_LOCKS_REQUIRED(!cs); }; /** @@ -233,7 +233,7 @@ class CMasternodeMetaMan : public MasternodeMetaStore bool IsValid() const { return is_valid; } - CMasternodeMetaInfoPtr GetMetaInfo(const uint256& proTxHash, bool fCreate = true) LOCKS_EXCLUDED(cs); + CMasternodeMetaInfoPtr GetMetaInfo(const uint256& proTxHash, bool fCreate = true) EXCLUSIVE_LOCKS_REQUIRED(!cs); int64_t GetDsqCount() const { return nDsqCount; } int64_t GetDsqThreshold(const uint256& proTxHash, int nMnCount); @@ -242,13 +242,13 @@ class CMasternodeMetaMan : public MasternodeMetaStore void DisallowMixing(const uint256& proTxHash); bool AddGovernanceVote(const uint256& proTxHash, const uint256& nGovernanceObjectHash); - void RemoveGovernanceObject(const uint256& nGovernanceObjectHash) LOCKS_EXCLUDED(cs); + void RemoveGovernanceObject(const uint256& nGovernanceObjectHash) EXCLUSIVE_LOCKS_REQUIRED(!cs); - std::vector GetAndClearDirtyGovernanceObjectHashes() LOCKS_EXCLUDED(cs); + std::vector GetAndClearDirtyGovernanceObjectHashes() EXCLUSIVE_LOCKS_REQUIRED(!cs); - bool AlreadyHavePlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs); - std::optional GetPlatformBan(const uint256& inv_hash) const LOCKS_EXCLUDED(cs); - void RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) LOCKS_EXCLUDED(cs); + bool AlreadyHavePlatformBan(const uint256& inv_hash) const EXCLUSIVE_LOCKS_REQUIRED(!cs); + std::optional GetPlatformBan(const uint256& inv_hash) const EXCLUSIVE_LOCKS_REQUIRED(!cs); + void RememberPlatformBan(const uint256& inv_hash, PlatformBanMessage&& msg) EXCLUSIVE_LOCKS_REQUIRED(!cs); }; #endif // BITCOIN_MASTERNODE_META_H From 4800e2fd9ec80baf74ee3bc0489dff82319001e0 Mon Sep 17 00:00:00 2001 From: pasta Date: Sat, 4 Oct 2025 06:40:16 -0500 Subject: [PATCH 057/656] chore: run clang-format --- src/coinjoin/util.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coinjoin/util.cpp b/src/coinjoin/util.cpp index 2858cef6a694..1436cf728104 100644 --- a/src/coinjoin/util.cpp +++ b/src/coinjoin/util.cpp @@ -2,9 +2,9 @@ // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include #include #include -#include #include #include #include From eb0e14c339656785034e7ee6ac20d2213be64a18 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Tue, 16 Sep 2025 00:57:05 +0700 Subject: [PATCH 058/656] refactor: rename hard-fork v23 to hard-fork v24 --- src/chainparams.cpp | 64 +++++++++++++------------- src/consensus/params.h | 2 +- src/deploymentinfo.cpp | 2 +- src/evo/creditpool.cpp | 4 +- src/evo/creditpool.h | 2 +- src/evo/deterministicmns.cpp | 12 ++--- src/evo/providertx.cpp | 2 +- src/evo/specialtxman.cpp | 6 +-- src/rpc/blockchain.cpp | 2 +- test/functional/feature_asset_locks.py | 10 ++-- test/functional/rpc_blockchain.py | 4 +- 11 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 31d47f4d4d12..48c69ce4a92e 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -209,14 +209,14 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].min_activation_height = 0; // No activation delay - consensus.vDeployments[Consensus::DEPLOYMENT_V23].bit = 12; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; // TODO - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; // TODO - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nWindowSize = 4032; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdStart = 3226; // 80% of 4032 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdMin = 2420; // 60% of 4032 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nFalloffCoeff = 5; // this corresponds to 10 periods - consensus.vDeployments[Consensus::DEPLOYMENT_V23].useEHF = true; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].bit = 12; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; // TODO + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; // TODO + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nWindowSize = 4032; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdStart = 3226; // 80% of 4032 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdMin = 2420; // 60% of 4032 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nFalloffCoeff = 5; // this corresponds to 10 periods + consensus.vDeployments[Consensus::DEPLOYMENT_V24].useEHF = true; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000aa587876325b0a1080c8"); // 2301632 @@ -407,14 +407,14 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].min_activation_height = 0; // No activation delay - consensus.vDeployments[Consensus::DEPLOYMENT_V23].bit = 12; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; // TODO - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nWindowSize = 100; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdStart = 80; // 80% of 100 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdMin = 60; // 60% of 100 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nFalloffCoeff = 5; // this corresponds to 10 periods - consensus.vDeployments[Consensus::DEPLOYMENT_V23].useEHF = true; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].bit = 12; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; // TODO + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nWindowSize = 100; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdStart = 80; // 80% of 100 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdMin = 60; // 60% of 100 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nFalloffCoeff = 5; // this corresponds to 10 periods + consensus.vDeployments[Consensus::DEPLOYMENT_V24].useEHF = true; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000000003472e1b8bd6a3b7"); // 1254997 @@ -580,14 +580,14 @@ class CDevNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].min_activation_height = 0; // No activation delay - consensus.vDeployments[Consensus::DEPLOYMENT_V23].bit = 12; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nStartTime = 1751328000; // July 1, 2025 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nWindowSize = 120; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdStart = 96; // 80% of 120 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdMin = 72; // 60% of 120 - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nFalloffCoeff = 5; // this corresponds to 10 periods - consensus.vDeployments[Consensus::DEPLOYMENT_V23].useEHF = true; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].bit = 12; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nStartTime = 1751328000; // July 1, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nWindowSize = 120; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdStart = 96; // 80% of 120 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdMin = 72; // 60% of 120 + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nFalloffCoeff = 5; // this corresponds to 10 periods + consensus.vDeployments[Consensus::DEPLOYMENT_V24].useEHF = true; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000000000000000000000"); @@ -817,14 +817,14 @@ class CRegTestParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].min_activation_height = 0; // No activation delay - consensus.vDeployments[Consensus::DEPLOYMENT_V23].bit = 12; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nStartTime = 0; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nWindowSize = 250; - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdStart = 250 / 5 * 4; // 80% of window size - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nThresholdMin = 250 / 5 * 3; // 60% of window size - consensus.vDeployments[Consensus::DEPLOYMENT_V23].nFalloffCoeff = 5; // this corresponds to 10 periods - consensus.vDeployments[Consensus::DEPLOYMENT_V23].useEHF = true; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].bit = 12; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nStartTime = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nWindowSize = 250; + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdStart = 250 / 5 * 4; // 80% of window size + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nThresholdMin = 250 / 5 * 3; // 60% of window size + consensus.vDeployments[Consensus::DEPLOYMENT_V24].nFalloffCoeff = 5; // this corresponds to 10 periods + consensus.vDeployments[Consensus::DEPLOYMENT_V24].useEHF = true; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00"); diff --git a/src/consensus/params.h b/src/consensus/params.h index c0457060f2aa..09cbd8c83fa5 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -40,7 +40,7 @@ constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_ enum DeploymentPos : uint16_t { DEPLOYMENT_TESTDUMMY, - DEPLOYMENT_V23, // Deployment of doubling withdrawal limit, extended addresses + DEPLOYMENT_V24, // Deployment of doubling withdrawal limit, extended addresses // NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp MAX_VERSION_BITS_DEPLOYMENTS }; diff --git a/src/deploymentinfo.cpp b/src/deploymentinfo.cpp index cc84c2777761..856403e32889 100644 --- a/src/deploymentinfo.cpp +++ b/src/deploymentinfo.cpp @@ -12,7 +12,7 @@ const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_B /*.gbt_force =*/ true, }, { - /*.name =*/"v23", + /*.name =*/"v24", /*.gbt_force =*/true, }, }; diff --git a/src/evo/creditpool.cpp b/src/evo/creditpool.cpp index 20ae214d32ff..2517af9567f1 100644 --- a/src/evo/creditpool.cpp +++ b/src/evo/creditpool.cpp @@ -189,8 +189,8 @@ CCreditPool CCreditPoolManager::ConstructCreditPool(const gsl::not_null GetValidatedPayload(const CTransaction& tx, gsl::not /** * Validates potential changes to masternode state version by ProTx transaction version - * @param[in] pindexPrev Previous block index to validate DEPLOYMENT_V23 activation + * @param[in] pindexPrev Previous block index to validate DEPLOYMENT_V24 activation * @param[in] tx_type Special transaction type * @param[in] state_version Current masternode state version * @param[in] tx_version Proposed transaction version * @param[out] state This may be set to an Error state if any error occurred processing them - * @returns true if version change is valid or DEPLOYMENT_V23 is not active + * @returns true if version change is valid or DEPLOYMENT_V24 is not active */ bool IsVersionChangeValid(gsl::not_null pindexPrev, const uint16_t tx_type, const uint16_t state_version, const uint16_t tx_version, TxValidationState& state) { - if (!DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), Consensus::DEPLOYMENT_V23)) { - // New restrictions only apply after v23 deployment + if (!DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), Consensus::DEPLOYMENT_V24)) { + // New restrictions only apply after v24 deployment return true; } @@ -1067,10 +1067,10 @@ bool CheckProRegTx(CDeterministicMNManager& dmnman, const CTransaction& tx, gsl: return false; } - const bool is_v23_active{DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), Consensus::DEPLOYMENT_V23)}; + const bool is_v24_active{DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), Consensus::DEPLOYMENT_V24)}; // No longer allow legacy scheme masternode registration - if (is_v23_active && opt_ptx->nVersion < ProTxVersion::BasicBLS) { + if (is_v24_active && opt_ptx->nVersion < ProTxVersion::BasicBLS) { return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-protx-version-disallowed"); } diff --git a/src/evo/providertx.cpp b/src/evo/providertx.cpp index 6702ede99223..99e046955bf2 100644 --- a/src/evo/providertx.cpp +++ b/src/evo/providertx.cpp @@ -23,7 +23,7 @@ template : DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), Consensus::DEPLOYMENT_V19), is_extaddr_eligible ? DeploymentActiveAfter(pindexPrev, Params().GetConsensus(), - Consensus::DEPLOYMENT_V23) + Consensus::DEPLOYMENT_V24) : false); } template uint16_t GetMaxFromDeployment(gsl::not_null pindexPrev, std::optional is_basic_override); diff --git a/src/evo/specialtxman.cpp b/src/evo/specialtxman.cpp index 9fcffe753299..017ee8be6820 100644 --- a/src/evo/specialtxman.cpp +++ b/src/evo/specialtxman.cpp @@ -206,7 +206,7 @@ bool CSpecialTxProcessor::BuildNewListFromBlock(const CBlock& block, gsl::not_nu newList.DecreaseScores(); const bool isMNRewardReallocation{DeploymentActiveAfter(pindexPrev, m_consensus_params, Consensus::DEPLOYMENT_MN_RR)}; - const bool is_v23_deployed{DeploymentActiveAfter(pindexPrev, m_consensus_params, Consensus::DEPLOYMENT_V23)}; + const bool is_v24_deployed{DeploymentActiveAfter(pindexPrev, m_consensus_params, Consensus::DEPLOYMENT_V24)}; // we skip the coinbase for (int i = 1; i < (int)block.vtx.size(); i++) { @@ -315,8 +315,8 @@ bool CSpecialTxProcessor::BuildNewListFromBlock(const CBlock& block, gsl::not_nu } auto newState = std::make_shared(*dmn->pdmnState); - if (is_v23_deployed) { - // Extended addresses support in v23 means that the version can be updated + if (is_v24_deployed) { + // Extended addresses support in v24 means that the version can be updated newState->nVersion = opt_proTx->nVersion; } newState->netInfo = opt_proTx->netInfo; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 89b4ae290df7..ab71b1767627 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1561,7 +1561,7 @@ RPCHelpMan getblockchaininfo() SoftForkDescPushBack(&tip, softforks, consensusParams, deploy); } for (auto ehf_deploy : { /* sorted by activation block */ - Consensus::DEPLOYMENT_V23, + Consensus::DEPLOYMENT_V24, Consensus::DEPLOYMENT_TESTDUMMY }) { SoftForkDescPushBack(&tip, ehfSignals, softforks, consensusParams, ehf_deploy); } diff --git a/test/functional/feature_asset_locks.py b/test/functional/feature_asset_locks.py index 5a312167f2eb..d482d1061de4 100755 --- a/test/functional/feature_asset_locks.py +++ b/test/functional/feature_asset_locks.py @@ -266,7 +266,7 @@ def run_test(self): self.test_withdrawal_limits(node_wallet, node, pubkey) self.test_mn_rr(node_wallet, node, pubkey) self.test_withdrawals_fork(node_wallet, node, pubkey) - self.test_v23_fork(node_wallet, node, pubkey) + self.test_v24_fork(node_wallet, node, pubkey) def test_asset_locks(self, node_wallet, node, pubkey): @@ -702,10 +702,10 @@ def test_withdrawals_fork(self, node_wallet, node, pubkey): self.generate(node, 1) self.ensure_tx_is_not_mined(txid_in_block) - def test_v23_fork(self, node_wallet, node, pubkey): - self.log.info("Testing asset unlock after 'v23' activation...") - self.activate_by_name('v23', 750) - self.log.info(f'post-v23 height: {node.getblockcount()} credit: {self.get_credit_pool_balance()}') + def test_v24_fork(self, node_wallet, node, pubkey): + self.log.info("Testing asset unlock after 'v24' activation...") + self.activate_by_name('v24', 750) + self.log.info(f'post-v24 height: {node.getblockcount()} credit: {self.get_credit_pool_balance()}') index = 601 while index < 611: diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index d18c366d0476..6ffaaa75f0f9 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -216,12 +216,12 @@ def _test_getblockchaininfo(self): 'v20': { 'type': 'buried', 'active': False, 'height': 412}, 'mn_rr': { 'type': 'buried', 'active': False, 'height': 413}, 'withdrawals': { 'type': 'buried', 'active': False, 'height': 600}, - 'v23': { + 'v24': { 'type': 'bip9', 'bip9': { 'status': 'defined', 'start_time': 0, - 'timeout': 9223372036854775807, # "v23" does not have a timeout so is set to the max int64 value + 'timeout': 9223372036854775807, # "v24" does not have a timeout so is set to the max int64 value 'since': 0, 'min_activation_height': 0, 'ehf': True From 73b757d98f7480ca9b212c88635727b00cfd493f Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Thu, 2 Oct 2025 14:45:25 +0300 Subject: [PATCH 059/656] fix: a few missing v23 references --- doc/release-notes-6666.md | 2 +- doc/release-notes-6729.md | 2 +- src/evo/providertx.h | 2 +- test/functional/rpc_netinfo.py | 16 ++++++++-------- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/release-notes-6666.md b/doc/release-notes-6666.md index 2043e461c923..e9c3a61f5d38 100644 --- a/doc/release-notes-6666.md +++ b/doc/release-notes-6666.md @@ -3,7 +3,7 @@ Notable Changes * Dash Core has added support for the ability to register multiple addr:port pairs to a masternode and for specifying distinct addresses for platform P2P and platform HTTPS endpoints. The consensus and format changes needed to enable - this capability is referred to as "extended addresses" and is enabled by the deployment of the v23 fork, affecting + this capability is referred to as "extended addresses" and is enabled by the deployment of the v24 fork, affecting new masternode registrations and service updates to basic BLS masternodes. * Operators must upgrade from legacy BLS scheme to basic BLS scheme before utilizing extended address capabilities diff --git a/doc/release-notes-6729.md b/doc/release-notes-6729.md index da7a26fa50ad..15f7bccbcea2 100644 --- a/doc/release-notes-6729.md +++ b/doc/release-notes-6729.md @@ -1,7 +1,7 @@ Notable Changes --------------- -* Dash Core will no longer permit the registration of new legacy scheme masternodes after the deployment of the v23 +* Dash Core will no longer permit the registration of new legacy scheme masternodes after the deployment of the v24 fork. Existing basic scheme masternodes will also be prohibited from downgrading to the legacy scheme after the deployment is active. diff --git a/src/evo/providertx.h b/src/evo/providertx.h index f2173b20e086..54c3be787fb6 100644 --- a/src/evo/providertx.h +++ b/src/evo/providertx.h @@ -37,7 +37,7 @@ enum : uint16_t { if (is_extended_addr) { // Requires *both* forks to be active to use extended addresses. is_basic_scheme_active could // be set to false due to RPC specialization, so we must evaluate is_extended_addr *last* to - // avoid accidentally upgrading a legacy BLS node to basic BLS due to v23 activation. + // avoid accidentally upgrading a legacy BLS node to basic BLS due to v24 activation. return ProTxVersion::ExtAddr; } return ProTxVersion::BasicBLS; diff --git a/test/functional/rpc_netinfo.py b/test/functional/rpc_netinfo.py index 5852fa4e40cc..99853ac97af4 100755 --- a/test/functional/rpc_netinfo.py +++ b/test/functional/rpc_netinfo.py @@ -22,8 +22,8 @@ from random import randint from typing import List, Optional -# Height at which BIP9 deployment DEPLOYMENT_V23 is activated -V23_ACTIVATION_THRESHOLD = 100 +# Height at which BIP9 deployment DEPLOYMENT_V24 is activated +V24_ACTIVATION_THRESHOLD = 100 # See CMainParams in src/chainparams.cpp DEFAULT_PORT_MAINNET_CORE_P2P = 9999 # See CRegTestParams in src/chainparams.cpp @@ -137,18 +137,18 @@ class NetInfoTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [[ - "-dip3params=2:2", f"-vbparams=v23:{self.mocktime}:999999999999:{V23_ACTIVATION_THRESHOLD}:10:8:6:5:0" + "-dip3params=2:2", f"-vbparams=v24:{self.mocktime}:999999999999:{V24_ACTIVATION_THRESHOLD}:10:8:6:5:0" ] for _ in range(self.num_nodes)] def skip_test_if_missing_module(self): self.skip_if_no_wallet() - def activate_v23(self): + def activate_v24(self): batch_size: int = 50 - while not softfork_active(self.nodes[0], "v23"): + while not softfork_active(self.nodes[0], "v24"): self.bump_mocktime(batch_size) self.generate(self.nodes[0], batch_size, sync_fun=lambda: self.sync_blocks()) - assert softfork_active(self.nodes[0], "v23") + assert softfork_active(self.nodes[0], "v24") def check_netinfo_fields(self, val, core_p2p_port: int, plat_https_port: Optional[int], plat_p2p_port: Optional[int]): assert_equal(val['core_p2p'][0], f"127.0.0.1:{core_p2p_port}") @@ -182,8 +182,8 @@ def run_test(self): self.test_validation_legacy() self.log.info("Test output masternode address fields for consistency (pre-fork)") self.test_fields() - self.log.info("Mine blocks to activate DEPLOYMENT_V23") - self.activate_v23() + self.log.info("Mine blocks to activate DEPLOYMENT_V24") + self.activate_v24() self.log.info("Test input validation for masternode address fields (post-fork)") self.test_validation_common() self.test_validation_extended() From 94839202bbbf83c6026a7a8467dea173d7847fd8 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Sun, 5 Oct 2025 03:03:03 +0700 Subject: [PATCH 060/656] fix: reference to repo dashcore-detached-sigs to dash-detached-sigs --- doc/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/release-process.md b/doc/release-process.md index f4fd46e8443d..2390badea11f 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -148,7 +148,7 @@ However if this is done, once the release has been tagged in the dash-detached-s ### Windows and macOS codesigners only: Commit the detached codesign payloads ```sh -pushd ~/dashcore-detached-sigs +pushd ~/dash-detached-sigs # checkout the appropriate branch for this release series git checkout "v${VERSION}" rm -rf * From a60cc1b3e09934335c3634d55102165162cf2e8a Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Sun, 5 Oct 2025 03:12:24 +0700 Subject: [PATCH 061/656] fix: annotation for sendrawtransaction rpc (VBOOL for deprecated IS) --- src/rpc/mempool.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 165ea861df88..75512672c6fa 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -58,7 +58,8 @@ RPCHelpMan sendrawtransaction() RPCTypeCheck(request.params, { UniValue::VSTR, UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() - UniValue::VBOOL + UniValue::VBOOL, + UniValue::VBOOL, }); CMutableTransaction mtx; From 6883393afc960f1829fada37a7682046c6d699ed Mon Sep 17 00:00:00 2001 From: MacroFake Date: Fri, 6 May 2022 11:11:41 +0200 Subject: [PATCH 062/656] Merge bitcoin/bitcoin#19426: refactor: Change * to & in MutableTransactionSignatureCreator fac6cfc50f65c610f2df9af3ec2efff5eade6661 refactor: Change * to & in MutableTransactionSignatureCreator (MarcoFalke) Pull request description: The `MutableTransactionSignatureCreator` constructor takes in a pointer to a mutable transaction. This is problematic for several reasons: * It would be undefined behaviour to pass in a nullptr because for signature creation, the memory of the mutable transaction is accessed * No caller currently passes in a nullptr, so passing a reference as a pointer is confusing Fix all issues by replacing `*` with `&` in `MutableTransactionSignatureCreator` ACKs for top commit: theStack: Code-review ACK fac6cfc50f65c610f2df9af3ec2efff5eade6661 jonatack: ACK fac6cfc50f65c610f2df9af3ec2efff5eade6661 Tree-SHA512: d84296b030bd4fa2709e5adbfe43a5f8377d218957d844af69a819893252af671df7f00004f5ba601a0bd70f3c1c2e58c4f00e75684da663f28432bb5c89fb86 --- src/bitcoin-tx.cpp | 2 +- src/psbt.cpp | 4 ++-- src/rpc/rawtransaction.cpp | 2 +- src/script/sign.cpp | 18 +++++++++--------- src/script/sign.h | 10 ++++++---- src/test/fuzz/script_sign.cpp | 2 +- src/test/script_tests.cpp | 2 +- 7 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index b97ec594abeb..9c9ccdae59dc 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -627,7 +627,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr) SignatureData sigdata = DataFromTransaction(mergedTx, i, coin.out); // Only sign SIGHASH_SINGLE if there's a corresponding output: if (!fHashSingle || (i < mergedTx.vout.size())) - ProduceSignature(keystore, MutableTransactionSignatureCreator(&mergedTx, i, amount, nHashType), prevPubKey, sigdata); + ProduceSignature(keystore, MutableTransactionSignatureCreator(mergedTx, i, amount, nHashType), prevPubKey, sigdata); UpdateInput(txin, sigdata); } diff --git a/src/psbt.cpp b/src/psbt.cpp index 90285c414ee8..bea409f2c281 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -205,7 +205,7 @@ void UpdatePSBTOutput(const SigningProvider& provider, PartiallySignedTransactio // Construct a would-be spend of this output, to update sigdata with. // Note that ProduceSignature is used to fill in metadata (not actual signatures), // so provider does not need to provide any private keys (it can be a HidingSigningProvider). - MutableTransactionSignatureCreator creator(&tx, /*input_idx=*/0, out.nValue, SIGHASH_ALL); + MutableTransactionSignatureCreator creator(tx, /*input_idx=*/0, out.nValue, SIGHASH_ALL); ProduceSignature(provider, creator, out.scriptPubKey, sigdata); // Put redeem_script, key paths, into PSBTOutput. @@ -267,7 +267,7 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& if (txdata == nullptr) { sig_complete = ProduceSignature(provider, DUMMY_SIGNATURE_CREATOR, utxo.scriptPubKey, sigdata); } else { - MutableTransactionSignatureCreator creator(&tx, index, utxo.nValue, txdata, sighash); + MutableTransactionSignatureCreator creator(tx, index, utxo.nValue, txdata, sighash); sig_complete = ProduceSignature(provider, creator, utxo.scriptPubKey, sigdata); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 48869e1f27df..5cbef3d8793f 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -897,7 +897,7 @@ static RPCHelpMan combinerawtransaction() sigdata.MergeSignatureData(DataFromTransaction(txv, i, coin.out)); } } - ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&mergedTx, i, coin.out.nValue, 1), coin.out.scriptPubKey, sigdata); + ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(mergedTx, i, coin.out.nValue, 1), coin.out.scriptPubKey, sigdata); UpdateInput(txin, sigdata); } diff --git a/src/script/sign.cpp b/src/script/sign.cpp index d58286fbabf7..65ae061466ad 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -17,16 +17,16 @@ typedef std::vector valtype; -MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction* tx, unsigned int input_idx, const CAmount& amount, int hash_type) - : txTo{tx}, nIn{input_idx}, nHashType{hash_type}, amount{amount}, checker{txTo, nIn, amount, MissingDataBehavior::FAIL}, +MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction& tx, unsigned int input_idx, const CAmount& amount, int hash_type) + : m_txto{tx}, nIn{input_idx}, nHashType{hash_type}, amount{amount}, checker{&m_txto, nIn, amount, MissingDataBehavior::FAIL}, m_txdata(nullptr) { } -MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction* tx, unsigned int input_idx, const CAmount& amount, const PrecomputedTransactionData* txdata, int hash_type) - : txTo{tx}, nIn{input_idx}, nHashType{hash_type}, amount{amount}, - checker{txdata ? MutableTransactionSignatureChecker{txTo, nIn, amount, *txdata, MissingDataBehavior::FAIL} : - MutableTransactionSignatureChecker{txTo, nIn, amount, MissingDataBehavior::FAIL}}, +MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction& tx, unsigned int input_idx, const CAmount& amount, const PrecomputedTransactionData* txdata, int hash_type) + : m_txto{tx}, nIn{input_idx}, nHashType{hash_type}, amount{amount}, + checker{txdata ? MutableTransactionSignatureChecker{&m_txto, nIn, amount, *txdata, MissingDataBehavior::FAIL} : + MutableTransactionSignatureChecker{&m_txto, nIn, amount, MissingDataBehavior::FAIL}}, m_txdata(txdata) { } @@ -37,7 +37,7 @@ bool MutableTransactionSignatureCreator::CreateSig(const SigningProvider& provid if (!provider.GetKey(address, key)) return false; - uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, m_txdata); + uint256 hash = SignatureHash(scriptCode, m_txto, nIn, nHashType, amount, sigversion, m_txdata); if (!key.Sign(hash, vchSig)) return false; vchSig.push_back((unsigned char)nHashType); @@ -324,7 +324,7 @@ bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, C { assert(nIn < txTo.vin.size()); - MutableTransactionSignatureCreator creator(&txTo, nIn, amount, nHashType); + MutableTransactionSignatureCreator creator(txTo, nIn, amount, nHashType); SignatureData sigdata; bool ret = ProduceSignature(provider, creator, fromPubKey, sigdata); @@ -437,7 +437,7 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out); // Only sign SIGHASH_SINGLE if there's a corresponding output: if (!fHashSingle || (i < mtx.vout.size())) { - ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, &txdata, nHashType), prevPubKey, sigdata); + ProduceSignature(*keystore, MutableTransactionSignatureCreator(mtx, i, amount, &txdata, nHashType), prevPubKey, sigdata); } UpdateInput(txin, sigdata); diff --git a/src/script/sign.h b/src/script/sign.h index e271c5742dc4..b449f9ee09e1 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -6,6 +6,7 @@ #ifndef BITCOIN_SCRIPT_SIGN_H #define BITCOIN_SCRIPT_SIGN_H +#include #include #include #include @@ -33,8 +34,9 @@ class BaseSignatureCreator { }; /** A signature creator for transactions. */ -class MutableTransactionSignatureCreator : public BaseSignatureCreator { - const CMutableTransaction* txTo; +class MutableTransactionSignatureCreator : public BaseSignatureCreator +{ + const CMutableTransaction& m_txto; unsigned int nIn; int nHashType; CAmount amount; @@ -42,8 +44,8 @@ class MutableTransactionSignatureCreator : public BaseSignatureCreator { const PrecomputedTransactionData* m_txdata; public: - MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int input_idx, const CAmount& amountIn, int nHashTypeIn = SIGHASH_ALL); - MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int input_idx, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn = SIGHASH_ALL); + MutableTransactionSignatureCreator(const CMutableTransaction& tx LIFETIMEBOUND, unsigned int input_idx, const CAmount& amountIn, int nHashTypeIn = SIGHASH_ALL); + MutableTransactionSignatureCreator(const CMutableTransaction& tx LIFETIMEBOUND, unsigned int input_idx, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn = SIGHASH_ALL); const BaseSignatureChecker& Checker() const override { return checker; } bool CreateSig(const SigningProvider& provider, std::vector& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; }; diff --git a/src/test/fuzz/script_sign.cpp b/src/test/fuzz/script_sign.cpp index 182e799840b4..c7206e9ca160 100644 --- a/src/test/fuzz/script_sign.cpp +++ b/src/test/fuzz/script_sign.cpp @@ -110,7 +110,7 @@ FUZZ_TARGET(script_sign, .init = initialize_script_sign) } if (n_in < script_tx_to.vin.size()) { (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral()); - MutableTransactionSignatureCreator signature_creator{&tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral()}; + MutableTransactionSignatureCreator signature_creator{tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral()}; std::vector vch_sig; CKeyID address; if (fuzzed_data_provider.ConsumeBool()) { diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 9407dc80d5de..043bb1b18d8d 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1090,7 +1090,7 @@ SignatureData CombineSignatures(const CTxOut& txout, const CMutableTransaction& SignatureData data; data.MergeSignatureData(scriptSig1); data.MergeSignatureData(scriptSig2); - ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(&tx, 0, txout.nValue), txout.scriptPubKey, data); + ProduceSignature(DUMMY_SIGNING_PROVIDER, MutableTransactionSignatureCreator(tx, 0, txout.nValue), txout.scriptPubKey, data); return data; } From 3b0a0ea13e209b52617ee2606c383f6f0e957dec Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 9 May 2022 10:56:11 +0100 Subject: [PATCH 063/656] Merge bitcoin/bitcoin#25078: doc: Shorten explanation of "maintainers" fa32ced49cf651b48e0a9cc165e45a27505a461f doc: Shorten explanation of "maintainers" (MacroFake) Pull request description: GitHub has an extensive documentation about permissions ( https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role ), so I don't think we should be trying to mirror them here. Specifically, this pull makes three changes: * Clarify that all "merge maintainers" can merge pull requests. Obviously, while GitHub users with the `Maintain` permission can not force push to protected branches, and GitHub users with the `Admin` permission can, I don't think this is worthy to mention in the contribution guidelines. During the whole time I was working on the project, I think this permission was only used once or twice, when I accidentally pushed an unsigned draft commit directly to `master`. See https://bitcoin-irc.chaincode.com/bitcoin-core-dev/2016-06-13#473584 . One could argue that there should be a list of maintainers in the doc. Though, as there is already a list of keys for verify-commits, this seems like unnecessary overhead. * Clarify that the release process is executed collectively by the developers. For example, release process code changes that are reproducible can be done by anyone without permission. Also, detached signatures are created by several people (see for example https://github.com/bitcoin-core/bitcoin-detached-sigs/commits/23.0), which (I believe) are also separate from the people that can push the binaries to the `bin` folder, which again are separate from the people who can release the snap/flatpak package. * Clarify that moderation is also done collectively by people with `Triage`, `Write`, `Maintain`, and `Admin` permission. I think it is fine to refer to everyone in that group as "maintainers", or at least don't clarify it further, as any attempt at that would start to duplicate GitHub docs. ACKs for top commit: laanwj: ACK fa32ced49cf651b48e0a9cc165e45a27505a461f prusnak: Approach ACK fa32ced49cf651b48e0a9cc165e45a27505a461f fanquake: ACK fa32ced49cf651b48e0a9cc165e45a27505a461f Tree-SHA512: ed87c2e538a32ff1611208a7262425160a4340a3112a1b2712d7e9a550fa191ddbebea0d8e45d3e578ead02d5ef17bddcaab3f6ee876f9018a5acbc65ffd0e1c --- CONTRIBUTING.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f14019e9658..25d0874c0af9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,10 +10,9 @@ First, in terms of structure, there is no particular concept of "Dash Core developers" in the sense of privileged people. Open source often naturally revolves around a meritocracy where contributors earn trust from the developer community over time. Nevertheless, some hierarchy is necessary for practical -purposes. As such, there are repository "maintainers" who are responsible for -merging pull requests, as well as a "lead maintainer" who is responsible for the -[release cycle](/doc/release-process.md) as well as overall merging, moderation -and appointment of maintainers. +purposes. As such, there are repository maintainers who are responsible for +merging pull requests, the [release cycle](/doc/release-process.md), and +moderation. Getting Started --------------- @@ -272,7 +271,7 @@ projects such as libsecp256k1), and is not to be confused with overall Dash Network Protocol consensus changes. Whether a pull request is merged into Dash Core rests with the project merge -maintainers and ultimately the project lead. +maintainers. Maintainers will take into consideration if a patch is in line with the general principles of the project; meets the minimum standards for inclusion; and will From 2f301f3b55a576788a4c713a5b838969ed4f1f54 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 11 May 2022 07:19:37 +0100 Subject: [PATCH 064/656] Merge bitcoin/bitcoin#25090: doc: Explain Bitcoin Core instead of Bitcoin in README.md BACKPORT NOTE description is edited, no backported the exactly same. faeb5b59a098578b3e8c552d35b5ba02b12af14d doc: Explain Bitcoin Core in README.md (MacroFake) Pull request description: Currently the README doesn't explain what Bitcoin Core is. Fix that. Further reading / Inspired by: * https://github.com/bitcoin/bitcoin/pull/25012 * https://github.com/bitcoin-core/bitcoincore.org/pull/783 * https://github.com/bitcoin-core/bitcoincore.org/pull/784 ACKs for top commit: laanwj: re-ACK faeb5b59a098578b3e8c552d35b5ba02b12af14d brunoerg: ACK faeb5b59a098578b3e8c552d35b5ba02b12af14d 1440000bytes: ACK https://github.com/bitcoin/bitcoin/pull/25090/commits/faeb5b59a098578b3e8c552d35b5ba02b12af14d w0xlt: ACK https://github.com/bitcoin/bitcoin/pull/25090/commits/faeb5b59a098578b3e8c552d35b5ba02b12af14d Tree-SHA512: f9a9460853487a46ba0219d26cefa1fcf8d650deb3c2656737a54648016af0cdac58c5d4641a390be8c05f3e78185bd99801e239fcb87d410c4df31f61bc7016 --- README.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 11875daf7a11..edb28d9739c8 100644 --- a/README.md +++ b/README.md @@ -10,15 +10,19 @@ https://www.dash.org For an immediately usable, binary version of the Dash Core software, see https://www.dash.org/downloads/. -Further information about Dash Core is available in [./doc/](/doc). +Dash Core connects to the Dash peer-to-peer network to download and fully +validate blocks and transactions. It also includes a wallet and graphical user +interface, which can be optionally built. + +Further information about Dash Core is available in the [doc folder](/doc). What is Dash? ------------- -Dash is an experimental digital currency that enables instant, private -payments to anyone, anywhere in the world. Dash uses peer-to-peer technology -to operate with no central authority: managing transactions and issuing money -are carried out collectively by the network. Dash Core is the name of the open +Dash is a digital currency that enables instant, private payments to anyone, +anywhere in the world. Dash uses peer-to-peer technology to operate with +no central authority: managing transactions and issuing money are carried out +collectively by the network. Dash Core is the name of the open source software which enables the use of this currency. From 3437dfd1e8b924ffcd02b489b55e2b65d288f2c8 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 16 May 2022 14:25:59 +0100 Subject: [PATCH 065/656] Merge bitcoin/bitcoin#25095: rpc: Fix implicit-integer-sign-change in gettxout fa347a906685df1d44cafa3e6cc7fdd2ace68ff5 rpc: Fix implicit-integer-sign-change in gettxout (MacroFake) Pull request description: ACKs for top commit: theStack: Code-review ACK fa347a906685df1d44cafa3e6cc7fdd2ace68ff5 Tree-SHA512: 2a1128f714119b6b6cfeb20ee59c4f46404d5a83942253c73de64b0289a7d41e4437cf77d19b1cf623e2dd15fbaa1ec7acd03cc5d6dde41b3c7d06a082073ea1 --- src/rpc/blockchain.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 89b4ae290df7..e64a9cff211d 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1298,8 +1298,7 @@ static RPCHelpMan gettxout() UniValue ret(UniValue::VOBJ); uint256 hash(ParseHashV(request.params[0], "txid")); - int n = request.params[1].getInt(); - COutPoint out(hash, n); + COutPoint out{hash, request.params[1].getInt()}; bool fMempool = true; if (!request.params[2].isNull()) fMempool = request.params[2].get_bool(); From cfae1e6025b86e3da265fd38d35d56d67ec6e83d Mon Sep 17 00:00:00 2001 From: pasta Date: Mon, 6 Oct 2025 09:59:20 -0400 Subject: [PATCH 066/656] refactor: update CoinJoin change address handling to respect wallet's avoid_reuse setting Modified CoinJoin denomination creation to utilize the wallet's "avoid_reuse" setting. When enabled, change is sent to a fresh change address to prevent address reuse; otherwise, it defaults to the legacy behavior of returning change to the source address. Removed the now obsolete `-coinjoinfreshchange` option and updated relevant documentation. --- doc/release-notes-6870.md | 8 ++++---- src/coinjoin/options.cpp | 7 ------- src/coinjoin/options.h | 4 ---- src/coinjoin/util.cpp | 8 +++++--- src/wallet/init.cpp | 2 +- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/doc/release-notes-6870.md b/doc/release-notes-6870.md index c8225e49fefb..dd2eede82adc 100644 --- a/doc/release-notes-6870.md +++ b/doc/release-notes-6870.md @@ -1,9 +1,9 @@ Wallet ------ -- Add `-coinjoinfreshchange` option to control change destination behavior - during CoinJoin denomination creation. By default (flag unset), change is - sent back to the source address (legacy behavior). When enabled, change is - sent to a fresh change address to avoid address/public key reuse. (#6870) +- CoinJoin denomination creation now respects the wallet's "avoid_reuse" + setting. When the wallet has `avoid_reuse` enabled, change is sent to a + fresh change address to avoid address/public key reuse. Otherwise, change + goes back to the source address (legacy behavior). (#6870) diff --git a/src/coinjoin/options.cpp b/src/coinjoin/options.cpp index 6b5e0acf33d4..887848e62935 100644 --- a/src/coinjoin/options.cpp +++ b/src/coinjoin/options.cpp @@ -61,18 +61,12 @@ void CCoinJoinClientOptions::SetDenomsHardCap(int denoms_hardcap) options.nCoinJoinDenomsHardCap = denoms_hardcap; } -void CCoinJoinClientOptions::SetFreshChange(bool fresh_change) -{ - CCoinJoinClientOptions& options = CCoinJoinClientOptions::Get(); - options.fCoinJoinFreshChange = fresh_change; -} void CCoinJoinClientOptions::Init() { assert(!CCoinJoinClientOptions::_instance); static CCoinJoinClientOptions instance; instance.fCoinJoinMultiSession = gArgs.GetBoolArg("-coinjoinmultisession", DEFAULT_COINJOIN_MULTISESSION); - instance.fCoinJoinFreshChange = gArgs.GetBoolArg("-coinjoinfreshchange", DEFAULT_COINJOIN_FRESHCHANGE); instance.nCoinJoinSessions = std::min(std::max((int)gArgs.GetIntArg("-coinjoinsessions", DEFAULT_COINJOIN_SESSIONS), MIN_COINJOIN_SESSIONS), MAX_COINJOIN_SESSIONS); instance.nCoinJoinRounds = std::min(std::max((int)gArgs.GetIntArg("-coinjoinrounds", DEFAULT_COINJOIN_ROUNDS), MIN_COINJOIN_ROUNDS), MAX_COINJOIN_ROUNDS); instance.nCoinJoinAmount = std::min(std::max((int)gArgs.GetIntArg("-coinjoinamount", DEFAULT_COINJOIN_AMOUNT), MIN_COINJOIN_AMOUNT), MAX_COINJOIN_AMOUNT); @@ -92,5 +86,4 @@ void CCoinJoinClientOptions::GetJsonInfo(UniValue& obj) obj.pushKV("max_amount", options.nCoinJoinAmount.load()); obj.pushKV("denoms_goal", options.nCoinJoinDenomsGoal.load()); obj.pushKV("denoms_hardcap", options.nCoinJoinDenomsHardCap.load()); - obj.pushKV("fresh_change", options.fCoinJoinFreshChange.load()); } diff --git a/src/coinjoin/options.h b/src/coinjoin/options.h index 809f192a0d50..b5e8c8a23f5a 100644 --- a/src/coinjoin/options.h +++ b/src/coinjoin/options.h @@ -31,7 +31,6 @@ static constexpr int DEFAULT_COINJOIN_DENOMS_HARDCAP = 300; static constexpr bool DEFAULT_COINJOIN_AUTOSTART = false; static constexpr bool DEFAULT_COINJOIN_MULTISESSION = false; -static constexpr bool DEFAULT_COINJOIN_FRESHCHANGE = false; // How many new denom outputs to create before we consider the "goal" loop in CreateDenominated // a final one and start creating an actual tx. Same limit applies for the "hard cap" part of the algo. @@ -61,7 +60,6 @@ class CCoinJoinClientOptions static int GetAmount() { return CCoinJoinClientOptions::Get().nCoinJoinAmount; } static int GetDenomsGoal() { return CCoinJoinClientOptions::Get().nCoinJoinDenomsGoal; } static int GetDenomsHardCap() { return CCoinJoinClientOptions::Get().nCoinJoinDenomsHardCap; } - static bool GetFreshChange() { return CCoinJoinClientOptions::Get().fCoinJoinFreshChange; } static void SetEnabled(bool fEnabled); static void SetMultiSessionEnabled(bool fEnabled); @@ -70,7 +68,6 @@ class CCoinJoinClientOptions static void SetAmount(CAmount amount); static void SetDenomsGoal(int denoms_goal); static void SetDenomsHardCap(int denoms_hardcap); - static void SetFreshChange(bool fresh_change); static bool IsEnabled() { return CCoinJoinClientOptions::Get().fEnableCoinJoin; } static bool IsMultiSessionEnabled() { return CCoinJoinClientOptions::Get().fCoinJoinMultiSession; } @@ -89,7 +86,6 @@ class CCoinJoinClientOptions std::atomic nCoinJoinDenomsHardCap{DEFAULT_COINJOIN_DENOMS_HARDCAP}; std::atomic fEnableCoinJoin{false}; std::atomic fCoinJoinMultiSession{DEFAULT_COINJOIN_MULTISESSION}; - std::atomic fCoinJoinFreshChange{DEFAULT_COINJOIN_FRESHCHANGE}; CCoinJoinClientOptions() = default; diff --git a/src/coinjoin/util.cpp b/src/coinjoin/util.cpp index 1436cf728104..808d3814b659 100644 --- a/src/coinjoin/util.cpp +++ b/src/coinjoin/util.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -126,10 +127,11 @@ CTransactionBuilder::CTransactionBuilder(CWallet& wallet, const CompactTallyItem coinControl.m_discard_feerate = ::GetDiscardRate(m_wallet); // Generate a feerate which will be used by calculations of this class and also by CWallet::CreateTransaction coinControl.m_feerate = std::max(GetRequiredFeeRate(m_wallet), m_wallet.m_pay_tx_fee); - // By default, keep legacy behavior: change goes back to the origin address. - // When -coinjoinfreshchange is enabled, let the wallet select a fresh + // If wallet does not have the avoid-reuse feature enabled, keep legacy + // behavior: force change to go back to the origin address. When + // WALLET_FLAG_AVOID_REUSE is enabled, let the wallet select a fresh // change destination to avoid address reuse. - if (!CCoinJoinClientOptions::GetFreshChange()) { + if (!m_wallet.IsWalletFlagSet(wallet::WALLET_FLAG_AVOID_REUSE)) { coinControl.destChange = tallyItemIn.txdest; } // Only allow tallyItems inputs for tx creation diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index 780828f1a5d6..f2ba25db56f0 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -105,7 +105,7 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const argsman.AddArg("-coinjoinmultisession", strprintf("Enable multiple CoinJoin mixing sessions per block, experimental (0-1, default: %u)", DEFAULT_COINJOIN_MULTISESSION), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinrounds=", strprintf("Use N separate masternodes for each denominated input to mix funds (%u-%u, default: %u)", MIN_COINJOIN_ROUNDS, MAX_COINJOIN_ROUNDS, DEFAULT_COINJOIN_ROUNDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinsessions=", strprintf("Use N separate masternodes in parallel to mix funds (%u-%u, default: %u)", MIN_COINJOIN_SESSIONS, MAX_COINJOIN_SESSIONS, DEFAULT_COINJOIN_SESSIONS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); - argsman.AddArg("-coinjoinfreshchange", strprintf("Send change from denomination creation to a fresh change address instead of the source address (0-1, default: %u)", DEFAULT_COINJOIN_FRESHCHANGE), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); + #ifdef USE_BDB argsman.AddArg("-dblogsize=", strprintf("Flush wallet database activity from memory to disk log every megabytes (default: %u)", DatabaseOptions().max_log_mb), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); From efc689ccda836eb6e4542f9972ebc09d7ca21ab1 Mon Sep 17 00:00:00 2001 From: PastaPastaPasta <6443210+PastaPastaPasta@users.noreply.github.com> Date: Mon, 6 Oct 2025 13:02:49 -0400 Subject: [PATCH 067/656] Apply suggestions from code review Co-authored-by: UdjinM6 --- src/coinjoin/options.cpp | 1 - src/coinjoin/util.cpp | 1 - src/wallet/init.cpp | 1 - 3 files changed, 3 deletions(-) diff --git a/src/coinjoin/options.cpp b/src/coinjoin/options.cpp index 887848e62935..f6e3e133d0e1 100644 --- a/src/coinjoin/options.cpp +++ b/src/coinjoin/options.cpp @@ -61,7 +61,6 @@ void CCoinJoinClientOptions::SetDenomsHardCap(int denoms_hardcap) options.nCoinJoinDenomsHardCap = denoms_hardcap; } - void CCoinJoinClientOptions::Init() { assert(!CCoinJoinClientOptions::_instance); diff --git a/src/coinjoin/util.cpp b/src/coinjoin/util.cpp index 808d3814b659..a494b8755d0d 100644 --- a/src/coinjoin/util.cpp +++ b/src/coinjoin/util.cpp @@ -2,7 +2,6 @@ // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include #include #include #include diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp index f2ba25db56f0..4aa657beb8d3 100644 --- a/src/wallet/init.cpp +++ b/src/wallet/init.cpp @@ -105,7 +105,6 @@ void WalletInit::AddWalletOptions(ArgsManager& argsman) const argsman.AddArg("-coinjoinmultisession", strprintf("Enable multiple CoinJoin mixing sessions per block, experimental (0-1, default: %u)", DEFAULT_COINJOIN_MULTISESSION), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinrounds=", strprintf("Use N separate masternodes for each denominated input to mix funds (%u-%u, default: %u)", MIN_COINJOIN_ROUNDS, MAX_COINJOIN_ROUNDS, DEFAULT_COINJOIN_ROUNDS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); argsman.AddArg("-coinjoinsessions=", strprintf("Use N separate masternodes in parallel to mix funds (%u-%u, default: %u)", MIN_COINJOIN_SESSIONS, MAX_COINJOIN_SESSIONS, DEFAULT_COINJOIN_SESSIONS), ArgsManager::ALLOW_ANY, OptionsCategory::WALLET_COINJOIN); - #ifdef USE_BDB argsman.AddArg("-dblogsize=", strprintf("Flush wallet database activity from memory to disk log every megabytes (default: %u)", DatabaseOptions().max_log_mb), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::WALLET_DEBUG_TEST); From 5d07b9c73a3f3f32dee86f5e104b9bc25ddfce0f Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Sun, 5 Oct 2025 04:12:28 +0700 Subject: [PATCH 068/656] feat: rewrite lint-cppcheck-dash from bash to python --- test/lint/lint-cppcheck-dash.py | 147 ++++++++++++++++++++++++++++++++ test/lint/lint-cppcheck-dash.sh | 108 ----------------------- 2 files changed, 147 insertions(+), 108 deletions(-) create mode 100755 test/lint/lint-cppcheck-dash.py delete mode 100755 test/lint/lint-cppcheck-dash.sh diff --git a/test/lint/lint-cppcheck-dash.py b/test/lint/lint-cppcheck-dash.py new file mode 100755 index 000000000000..798d1fb48e14 --- /dev/null +++ b/test/lint/lint-cppcheck-dash.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2019 The Bitcoin Core developers +# Copyright (c) 2025 The Dash Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Run cppcheck for dash specific files + +import multiprocessing +import os +import re +import subprocess +import sys + +os.environ['LC_ALL'] = 'C' + +ENABLED_CHECKS = ( + "Class '.*' has a constructor with 1 argument that is not explicit.", + "Struct '.*' has a constructor with 1 argument that is not explicit.", + "Function parameter '.*' should be passed by const reference.", + "Comparison of modulo result is predetermined", + "Local variable '.*' shadows outer argument", + "Redundant initialization for '.*'. The initialized value is overwritten before it is read.", + "Dereferencing '.*' after it is deallocated / released", + "The scope of the variable '.*' can be reduced.", + "Parameter '.*' can be declared with const", + "Variable '.*' can be declared with const", + "Variable '.*' is assigned a value that is never used.", + "Unused variable", + "The function '.*' overrides a function in a base class but is not marked with a 'override' specifier.", + # Enable to catch all warnings + # ".*", +) + +IGNORED_WARNINGS = ( + "src/bls/bls.h:.* Struct 'CBLSIdImplicit' has a constructor with 1 argument that is not explicit.", + "src/rpc/masternode.cpp:.*:21: warning: Consider using std::copy algorithm instead of a raw loop.", # UniValue doesn't support std::copy + "src/cachemultimap.h:.*: warning: Variable 'mapIt' can be declared as reference to const", + "src/evo/simplifiedmns.cpp:.*:20: warning: Consider using std::copy algorithm instead of a raw loop.", + "src/llmq/commitment.cpp.* warning: Consider using std::all_of or std::none_of algorithm instead of a raw loop. [useStlAlgorithm]", + "src/rpc/.*cpp:.*: note: Function pointer used here.", + "src/masternode/sync.cpp:.*: warning: Variable 'pnode' can be declared as pointer to const [constVariableReference]", + "src/wallet/bip39.cpp.*: warning: The scope of the variable 'ssCurrentWord' can be reduced. [variableScope]", + "src/.*:.*: warning: Local variable '_' shadows outer function [shadowFunction]", + + "src/stacktraces.cpp:.*: .*: Parameter 'info' can be declared as pointer to const", + "src/stacktraces.cpp:.*: note: You might need to cast the function pointer here", + + "[note|warning]: Return value 'state.Invalid(.*)' is always false", + "note: Calling function 'Invalid' returns 0", + "note: Shadow variable", + + # General catchall, for some reason any value named 'hash' is viewed as never used. + "Variable 'hash' is assigned a value that is never used.", + + # The following can be useful to ignore when the catch all is used + # "Consider performing initialization in initialization list.", + "Consider using std::transform algorithm instead of a raw loop.", + "Consider using std::accumulate algorithm instead of a raw loop.", + "Consider using std::any_of algorithm instead of a raw loop.", + "Consider using std::copy_if algorithm instead of a raw loop.", + # "Consider using std::count_if algorithm instead of a raw loop.", + # "Consider using std::find_if algorithm instead of a raw loop.", + # "Member variable '.*' is not initialized in the constructor.", + + "unusedFunction", + "unknownMacro", + "unusedStructMember", +) + +def main(): + warnings = [] + exit_code = 0 + + try: + subprocess.check_output(['cppcheck', '--version']) + except FileNotFoundError: + print("Skipping cppcheck linting since cppcheck is not installed.") + sys.exit(0) + + with open('test/util/data/non-backported.txt', 'r', encoding='utf-8') as f: + patterns = [line.strip() for line in f if line.strip()] + + files_output = subprocess.check_output(['git', 'ls-files', '--'] + patterns, universal_newlines=True, encoding="utf8") + files = [f.strip() for f in files_output.splitlines() if f.strip()] + + enabled_regexp = '|'.join(ENABLED_CHECKS) + ignored_regexp = '|'.join(IGNORED_WARNINGS) + files_regexp = '|'.join(re.escape(f) for f in files) + + script_dir = os.path.dirname(os.path.abspath(__file__)) + cache_dir = os.environ.get('CACHE_DIR') + if cache_dir: + cppcheck_dir = os.path.join(cache_dir, 'cppcheck') + else: + cppcheck_dir = os.path.join(script_dir, '.cppcheck') + os.makedirs(cppcheck_dir, exist_ok=True) + + cppcheck_cmd = [ + 'cppcheck', + '--enable=all', + '--inline-suppr', + '--suppress=missingIncludeSystem', + f'--cppcheck-build-dir={cppcheck_dir}', + '-j', str(multiprocessing.cpu_count()), + '--language=c++', + '--std=c++20', + '--template=gcc', + '-D__cplusplus', + '-DENABLE_WALLET', + '-DCLIENT_VERSION_BUILD', + '-DCLIENT_VERSION_IS_RELEASE', + '-DCLIENT_VERSION_MAJOR', + '-DCLIENT_VERSION_MINOR', + '-DCOPYRIGHT_YEAR', + '-DDEBUG', + '-DUSE_EPOLL', + '-DCHAR_BIT=8', + '-I', 'src/', + '-q', + ] + files + + dependencies_output = subprocess.run( + cppcheck_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + + unique_sorted_lines = sorted(set(dependencies_output.stdout.splitlines())) + for line in unique_sorted_lines: + if re.search(enabled_regexp, line) and not re.search(ignored_regexp, line) and re.search(files_regexp, line): + warnings.append(line) + + if warnings: + print('\n'.join(warnings)) + print() + print("Advice not applicable in this specific case? Add an exception by updating") + print(f"IGNORED_WARNINGS in {__file__}") + # Uncomment to enforce the linter / comment to run locally + exit_code = 1 + + sys.exit(exit_code) + +if __name__ == "__main__": + main() diff --git a/test/lint/lint-cppcheck-dash.sh b/test/lint/lint-cppcheck-dash.sh deleted file mode 100755 index c396142abff3..000000000000 --- a/test/lint/lint-cppcheck-dash.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# - -export LC_ALL=C - -ENABLED_CHECKS=( - "Class '.*' has a constructor with 1 argument that is not explicit." - "Struct '.*' has a constructor with 1 argument that is not explicit." - "Function parameter '.*' should be passed by const reference." - "Comparison of modulo result is predetermined" - "Local variable '.*' shadows outer argument" - "Redundant initialization for '.*'. The initialized value is overwritten before it is read." - "Dereferencing '.*' after it is deallocated / released" - "The scope of the variable '.*' can be reduced." - "Parameter '.*' can be declared with const" - "Variable '.*' can be declared with const" - "Variable '.*' is assigned a value that is never used." - "Unused variable" - "The function '.*' overrides a function in a base class but is not marked with a 'override' specifier." -# Enable to catch all warnings - ".*" -) - -IGNORED_WARNINGS=( - "src/bls/bls.h:.* Struct 'CBLSIdImplicit' has a constructor with 1 argument that is not explicit." - "src/rpc/masternode.cpp:.*:21: warning: Consider using std::copy algorithm instead of a raw loop." # UniValue doesn't support std::copy - "src/cachemultimap.h:.*: warning: Variable 'mapIt' can be declared as reference to const" - "src/evo/simplifiedmns.cpp:.*:20: warning: Consider using std::copy algorithm instead of a raw loop." - "src/llmq/commitment.cpp.* warning: Consider using std::all_of or std::none_of algorithm instead of a raw loop. \[useStlAlgorithm\]" - "src/rpc/.*cpp:.*: note: Function pointer used here." - "src/masternode/sync.cpp:.*: warning: Variable 'pnode' can be declared as pointer to const \[constVariableReference\]" - "src/wallet/bip39.cpp.*: warning: The scope of the variable 'ssCurrentWord' can be reduced. \[variableScope\]" - "src/.*:.*: warning: Local variable '_' shadows outer function \[shadowFunction\]" - - "src/stacktraces.cpp:.*: .*: Parameter 'info' can be declared as pointer to const" - "src/stacktraces.cpp:.*: note: You might need to cast the function pointer here" - - "[note|warning]: Return value 'state.Invalid(.*)' is always false" - "note: Calling function 'Invalid' returns 0" - "note: Shadow variable" - -# General catchall, for some reason any value named 'hash' is viewed as never used. - "Variable 'hash' is assigned a value that is never used." - -# The following can be useful to ignore when the catch all is used -# "Consider performing initialization in initialization list." - "Consider using std::transform algorithm instead of a raw loop." - "Consider using std::accumulate algorithm instead of a raw loop." - "Consider using std::any_of algorithm instead of a raw loop." - "Consider using std::copy_if algorithm instead of a raw loop." -# "Consider using std::count_if algorithm instead of a raw loop." -# "Consider using std::find_if algorithm instead of a raw loop." -# "Member variable '.*' is not initialized in the constructor." - - "unusedFunction" - "unknownMacro" - "unusedStructMember" -) - -# We should attempt to update this with all dash specific code -# shellcheck disable=SC2046 -FILES=$(git ls-files -- $(cat test/util/data/non-backported.txt)) - - -if ! command -v cppcheck > /dev/null; then - echo "Skipping cppcheck linting since cppcheck is not installed." - exit 0 -fi - -function join_array { - local IFS="$1" - shift - echo "$*" -} - -ENABLED_CHECKS_REGEXP=$(join_array "|" "${ENABLED_CHECKS[@]}") -IGNORED_WARNINGS_REGEXP=$(join_array "|" "${IGNORED_WARNINGS[@]}") -FILES_REGEXP=$(join_array "|" "${FILES[@]}") -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -# Check if CACHE_DIR is set and non-empty, otherwise use default .cppcheck/ directory -if [[ -n "$CACHE_DIR" ]]; then - CPPCHECK_DIR=$CACHE_DIR/cppcheck/ -else - CPPCHECK_DIR=$SCRIPT_DIR/.cppcheck/ -fi -if [ ! -d "$CPPCHECK_DIR" ] -then - mkdir -p "$CPPCHECK_DIR" -fi -WARNINGS=$(echo "${FILES}" | \ - xargs cppcheck --enable=all --inline-suppr --suppress=missingIncludeSystem --cppcheck-build-dir="$CPPCHECK_DIR" -j "$(getconf _NPROCESSORS_ONLN)" --language=c++ --std=c++20 --template=gcc -D__cplusplus -DENABLE_WALLET -DCLIENT_VERSION_BUILD -DCLIENT_VERSION_IS_RELEASE -DCLIENT_VERSION_MAJOR -DCLIENT_VERSION_MINOR -DCOPYRIGHT_YEAR -DDEBUG -DUSE_EPOLL -DCHAR_BIT=8 -I src/ -q 2>&1 | sort -u | \ - grep -E "${ENABLED_CHECKS_REGEXP}" | \ - grep -vE "${IGNORED_WARNINGS_REGEXP}" | \ - grep -E "${FILES_REGEXP}") - -if [[ ${WARNINGS} != "" ]]; then - echo "${WARNINGS}" - echo - echo "Advice not applicable in this specific case? Add an exception by updating" - echo "IGNORED_WARNINGS in $0" - # Uncomment to enforce the linter / comment to run locally - exit 1 -fi -exit 0 From 48a02e2d29e0fdfcd550dae777cb471b902e162e Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Mon, 6 Oct 2025 14:30:38 +0700 Subject: [PATCH 069/656] refactor: add missing explicit for NetInfoEntry --- src/evo/netinfo.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/evo/netinfo.h b/src/evo/netinfo.h index 3ea51992b79e..28d92364481b 100644 --- a/src/evo/netinfo.h +++ b/src/evo/netinfo.h @@ -195,13 +195,13 @@ class NetInfoEntry public: NetInfoEntry() = default; - NetInfoEntry(const DomainPort& domain) + explicit NetInfoEntry(const DomainPort& domain) { if (!domain.IsValid()) return; m_type = NetInfoType::Domain; m_data = domain; } - NetInfoEntry(const CService& service) + explicit NetInfoEntry(const CService& service) { if (!service.IsValid()) return; m_type = NetInfoType::Service; From f95ba2091bc7822c6e4cd0f4014ddac739122a5e Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Mon, 6 Oct 2025 14:31:05 +0700 Subject: [PATCH 070/656] refactor: reduce scope for `ki` variable, replace assigning to clear() for secure string --- src/wallet/bip39.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/wallet/bip39.cpp b/src/wallet/bip39.cpp index 40d7fd0b70d9..39b4f244975c 100644 --- a/src/wallet/bip39.cpp +++ b/src/wallet/bip39.cpp @@ -96,12 +96,11 @@ bool CMnemonic::Check(const SecureString& mnemonic) SecureString ssCurrentWord; SecureVector bits(32 + 1); - - uint32_t ki, nBitsCount{}; + uint32_t nBitsCount{}; for (size_t i = 0; i < mnemonic.size(); ++i) { - ssCurrentWord = ""; + ssCurrentWord.resize(0); // we resize ssCurrentWord instead recreating to avoid new allocations while (i + ssCurrentWord.size() < mnemonic.size() && mnemonic[i + ssCurrentWord.size()] != ' ') { if (ssCurrentWord.size() >= 9) { return false; @@ -115,7 +114,7 @@ bool CMnemonic::Check(const SecureString& mnemonic) return false; } if (ssCurrentWord == wordlist[nWordIndex]) { // word found on index nWordIndex - for (ki = 0; ki < 11; ki++) { + for (uint32_t ki = 0; ki < 11; ki++) { if (nWordIndex & (1 << (10 - ki))) { bits[nBitsCount / 8] |= 1 << (7 - (nBitsCount % 8)); } From 8f70685abc671f97cc4bc61dee88b165e56e8d44 Mon Sep 17 00:00:00 2001 From: MacroFake Date: Tue, 7 Jun 2022 10:37:29 +0200 Subject: [PATCH 071/656] Merge bitcoin/bitcoin#25288: test: Reliably don't start itself (lint-all.py runs all tests twice) BACKPORT NOTE: only left-over changes has not been done previously is replacing a mask "lint-*" to "lint-*.py" and it's done f26a496dfd0a7ce3833a10075027d7d5b0345e32 test: clean up all-lint.py (Martin Leitner-Ankerl) 64d72c4c8734b9dd45cb61cb2c2baf98766b0163 test: rename lint-all.py to all-lint.py (Martin Leitner-Ankerl) Pull request description: When running `./test/lint/lint-all.py`, the script runs all tests but also calls itself because the comparison with `__file__` doesn't work. Comparing resolved paths gives reliable comparison, and lint-all.py doesn't call itself any more ACKs for top commit: laanwj: Code review ACK f26a496dfd0a7ce3833a10075027d7d5b0345e32 Tree-SHA512: b44abdd685f7b48a6a9f48e96d97138b635c31c1c7ab543cb5636b5f49690ccd56fa6fec01ae7fcc16af01a613372ee77632f70c32059919b373aa8051953791 --- test/lint/all-lint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/lint/all-lint.py b/test/lint/all-lint.py index 40274fcc41e4..e8d7299f2a1d 100755 --- a/test/lint/all-lint.py +++ b/test/lint/all-lint.py @@ -15,7 +15,7 @@ exit_code = 0 mod_path = Path(__file__).parent -lints = glob(f"{mod_path}/lint-*") +lints = glob(f"{mod_path}/lint-*.py") if which("parallel") and which("column"): logfile = "parallel_out.log" command = ["parallel", "--jobs", "100%", "--will-cite", "--joblog", logfile, ":::"] + lints From a9218b1de78544b3f2d6dcb92a05b074ec91b1f7 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:00:58 +0000 Subject: [PATCH 072/656] rpc: introduce `GetJsonHelp()` to provide help text for Dash RPC objects Currently, the most reliable source of RPC help text is the published documentation. This is fine for releases as the whole set of changes undergo evaluation but a problem for nightlies or documenting behavior changes or deprecations in-source. --- src/evo/core_write.cpp | 113 ++++++++++++++++++++++++++++++++++++- src/evo/providertx.h | 9 ++- src/rpc/rawtransaction.cpp | 14 +++-- 3 files changed, 126 insertions(+), 10 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 574910214294..c8a85636a213 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -2,9 +2,6 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include -#include - #include #include #include @@ -15,8 +12,55 @@ #include #include +#include +#include +#include +#include + #include +#include +#include + +namespace { +RPCResult GetRpcResult(const std::string& key, bool optional = false) +{ +#define RESULT_MAP_ENTRY(type, name, desc) {name, {type, name, optional, desc}} + const std::map result_map{{ + {"addresses", + {RPCResult::Type::OBJ, "addresses", optional, "Network addresses of the masternode", + { + {RPCResult::Type::ARR, "core_p2p", /*optional=*/true, "Addresses used for protocol P2P", + {{RPCResult::Type::STR, "address", ""}}}, + {RPCResult::Type::ARR, "platform_p2p", /*optional=*/true, "Addresses used for Platform P2P", + {{RPCResult::Type::STR, "address", ""}}}, + {RPCResult::Type::ARR, "platform_https", /*optional=*/true, "Addresses used for Platform HTTPS API", + {{RPCResult::Type::STR, "address", ""}}}, + }}}, + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "inputsHash", "Hash of all the outpoints of the transaction inputs"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "operatorPayoutAddress", "Dash address used for operator reward payments"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "payoutAddress", "Dash address used for masternode reward payments"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformHTTPPort", "(DEPRECATED) TCP port of Platform HTTP API"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "platformNodeID", "Node ID derived from P2P public key for Platform P2P"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformP2PPort", "(DEPRECATED) TCP port of Platform P2P"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "proTxHash", "Hash of the masternode's initial ProRegTx"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "pubKeyOperator", "BLS public key used for operator signing"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "service", "(DEPRECATED) IP address and port of the masternode"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "type", "Masternode type"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "version", "Special transaction version"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "votingAddress", "Dash address used for voting"), + }}; +#undef RESULT_MAP_ENTRY + + if (const auto it = result_map.find(key); it != result_map.end()) { + return it->second; + } + + throw NonFatalCheckError(strprintf("Requested invalid RPCResult for nonexistent key \"%s\"", key).c_str(), + __FILE__, __LINE__, __func__); +} +} // anonymous namespace + [[nodiscard]] UniValue CAssetLockPayload::ToJson() const { UniValue outputs(UniValue::VARR); @@ -65,6 +109,28 @@ return ret; } +[[nodiscard]] RPCResult CProRegTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode registration special transaction", + { + GetRpcResult("version"), + GetRpcResult("type"), + {RPCResult::Type::STR_HEX, "collateralHash", "Collateral transaction hash"}, + {RPCResult::Type::NUM, "collateralIndex", "Collateral transaction output index"}, + GetRpcResult("service"), + GetRpcResult("addresses"), + {RPCResult::Type::STR, "ownerAddress", "Dash address used for payee updates and proposal voting"}, + GetRpcResult("votingAddress"), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("pubKeyOperator"), + {RPCResult::Type::NUM, "operatorReward", "Fraction in %% of reward shared with the operator between 0 and 10000"}, + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("platformP2PPort", /*optional=*/true), + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("inputsHash"), + }}; +} + [[nodiscard]] UniValue CProRegTx::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -90,6 +156,19 @@ return ret; } +[[nodiscard]] RPCResult CProUpRegTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode update registrar special transaction", + { + GetRpcResult("version"), + GetRpcResult("proTxHash"), + GetRpcResult("votingAddress"), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("pubKeyOperator"), + GetRpcResult("inputsHash"), + }}; +} + [[nodiscard]] UniValue CProUpRegTx::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -104,6 +183,17 @@ return ret; } +[[nodiscard]] RPCResult CProUpRevTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode operator revocation special transaction", + { + GetRpcResult("version"), + GetRpcResult("proTxHash"), + {RPCResult::Type::NUM, "reason", "Reason for masternode service revocation"}, + GetRpcResult("inputsHash", /*optional=*/true), + }}; +} + [[nodiscard]] UniValue CProUpRevTx::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -114,6 +204,23 @@ return ret; } +[[nodiscard]] RPCResult CProUpServTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode update service special transaction", + { + GetRpcResult("version"), + GetRpcResult("type"), + GetRpcResult("proTxHash"), + GetRpcResult("service"), + GetRpcResult("addresses"), + GetRpcResult("operatorPayoutAddress", /*optional=*/true), + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("platformP2PPort", /*optional=*/true), + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("inputsHash"), + }}; +} + [[nodiscard]] UniValue CProUpServTx::ToJson() const { UniValue ret(UniValue::VOBJ); diff --git a/src/evo/providertx.h b/src/evo/providertx.h index f2173b20e086..b4a95dd820a6 100644 --- a/src/evo/providertx.h +++ b/src/evo/providertx.h @@ -6,22 +6,23 @@ #define BITCOIN_EVO_PROVIDERTX_H #include +#include #include #include #include #include -#include #include #include #include -#include #include +#include #include class CBlockIndex; class TxValidationState; +struct RPCResult; namespace ProTxVersion { enum : uint16_t { @@ -120,6 +121,7 @@ class CProRegTx std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; bool IsTriviallyValid(gsl::not_null pindexPrev, TxValidationState& state) const; @@ -180,6 +182,7 @@ class CProUpServTx std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; bool IsTriviallyValid(gsl::not_null pindexPrev, TxValidationState& state) const; @@ -226,6 +229,7 @@ class CProUpRegTx std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; bool IsTriviallyValid(gsl::not_null pindexPrev, TxValidationState& state) const; @@ -275,6 +279,7 @@ class CProUpRevTx std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; bool IsTriviallyValid(gsl::not_null pindexPrev, TxValidationState& state) const; diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 48869e1f27df..d0e61b5e8029 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -27,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -46,17 +44,19 @@ #include #include #include -#include #include #include +#include #include -#include #include +#include #include +#include +#include +#include #include -#include #include @@ -168,6 +168,10 @@ static std::vector DecodeTxDoc(const std::string& txid_field_doc) }}, {RPCResult::Type::NUM, "extraPayloadSize", /*optional=*/true, "Size of DIP2 extra payload. Only present if it's a special TX"}, {RPCResult::Type::STR_HEX, "extraPayload", /*optional=*/true, "Hex-encoded DIP2 extra payload data. Only present if it's a special TX"}, + CProRegTx::GetJsonHelp(/*key=*/"proRegTx", /*optional=*/true), + CProUpServTx::GetJsonHelp(/*key=*/"proUpServTx", /*optional=*/true), + CProUpRegTx::GetJsonHelp(/*key=*/"proUpRegTx", /*optional=*/true), + CProUpRevTx::GetJsonHelp(/*key=*/"proUpRevTx", /*optional=*/true), }; } From 9de8c9cc8f95da96d532f01fd6498b4e64423055 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sat, 4 Oct 2025 13:41:31 +0000 Subject: [PATCH 073/656] trivial: add missing headers Header resorting in the previous commit revealed some headers that were transiently included were no longer available, this should resolve that. --- src/rpc/rawtransaction.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index d0e61b5e8029..25b96b60200c 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -46,7 +46,9 @@ #include #include +#include #include +#include #include #include #include From 7219a293417f4d2738ef646811450d71a19fbb1e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 6 Oct 2025 20:35:19 +0000 Subject: [PATCH 074/656] rpc: `GetJsonHelp()` defs for `CbTx`, `SimplifiedMNList`, final commitm. --- src/evo/cbtx.h | 5 +++ src/evo/core_write.cpp | 76 ++++++++++++++++++++++++++++++++++++++ src/evo/simplifiedmns.h | 9 ++++- src/evo/smldiff.h | 6 ++- src/llmq/commitment.h | 15 ++++---- src/rpc/blockchain.cpp | 8 +--- src/rpc/evo.cpp | 2 +- src/rpc/rawtransaction.cpp | 3 ++ 8 files changed, 105 insertions(+), 19 deletions(-) diff --git a/src/evo/cbtx.h b/src/evo/cbtx.h index 482d880dd522..da6d92f802a8 100644 --- a/src/evo/cbtx.h +++ b/src/evo/cbtx.h @@ -6,16 +6,20 @@ #define BITCOIN_EVO_CBTX_H #include + #include + #include #include +#include class BlockValidationState; class CBlock; class CBlockIndex; class CDeterministicMNList; class TxValidationState; +struct RPCResult; namespace llmq { class CQuorumBlockProcessor; @@ -57,6 +61,7 @@ class CCbTx } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); std::string ToString() const; [[nodiscard]] UniValue ToJson() const; diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index c8a85636a213..4d2d72af45a0 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -37,7 +37,10 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) {RPCResult::Type::ARR, "platform_https", /*optional=*/true, "Addresses used for Platform HTTPS API", {{RPCResult::Type::STR, "address", ""}}}, }}}, + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "height", "Block height"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "inputsHash", "Hash of all the outpoints of the transaction inputs"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootMNList", "Merkle root of the masternode list"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootQuorums", "Merkle root of the quorum list"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "operatorPayoutAddress", "Dash address used for operator reward payments"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "payoutAddress", "Dash address used for masternode reward payments"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformHTTPPort", "(DEPRECATED) TCP port of Platform HTTP API"), @@ -92,6 +95,20 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) return ret; } +[[nodiscard]] RPCResult CCbTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The coinbase special transaction", + { + GetRpcResult("version"), + GetRpcResult("height"), + GetRpcResult("merkleRootMNList"), + GetRpcResult("merkleRootQuorums", /*optional=*/true), + {RPCResult::Type::NUM, "bestCLHeightDiff", /*optional=*/true, "Blocks between the current block and the last known block with a ChainLock"}, + {RPCResult::Type::STR_HEX, "bestCLSignature", /*optional=*/true, "Best ChainLock signature known by the miner"}, + {RPCResult::Type::NUM, "creditPoolBalance", /*optional=*/true, "Balance in the Platform credit pool"}, + }}; +} + [[nodiscard]] UniValue CCbTx::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -249,6 +266,16 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) return ret; } +[[nodiscard]] RPCResult llmq::CFinalCommitmentTxPayload::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment special transaction", + { + GetRpcResult("version"), + GetRpcResult("height"), + // TODO: Add RPCResult for llmq::CFinalCommitment + }}; +} + [[nodiscard]] UniValue llmq::CFinalCommitmentTxPayload::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -258,6 +285,26 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) return ret; } +[[nodiscard]] RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list entry", + { + {RPCResult::Type::NUM, "nVersion", "Version of the entry"}, + {RPCResult::Type::NUM, "nType", "Masternode type"}, + {RPCResult::Type::STR_HEX, "proRegTxHash", "Hash of the ProRegTx identifying the masternode"}, + {RPCResult::Type::STR_HEX, "confirmedHash", "Hash of the block where the masternode was confirmed"}, + GetRpcResult("service"), + GetRpcResult("addresses"), + GetRpcResult("pubKeyOperator"), + GetRpcResult("votingAddress"), + {RPCResult::Type::BOOL, "isValid", "Returns true if the masternode is not Proof-of-Service banned"}, + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("operatorPayoutAddress", /*optional=*/true), + }}; +} + [[nodiscard]] UniValue CSimplifiedMNListEntry::ToJson(bool extended) const { UniValue obj(UniValue::VOBJ); @@ -287,6 +334,35 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) return obj; } +[[nodiscard]] RPCResult CSimplifiedMNListDiff::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list diff", + { + {RPCResult::Type::NUM, "nVersion", "Version of the diff"}, + {RPCResult::Type::STR_HEX, "baseBlockHash", "Hash of the base block"}, + {RPCResult::Type::STR_HEX, "blockHash", "Hash of the ending block"}, + {RPCResult::Type::STR_HEX, "cbTxMerkleTree", "Coinbase transaction merkle tree"}, + {RPCResult::Type::STR_HEX, "cbTx", "Coinbase raw transaction"}, + {RPCResult::Type::ARR, "deletedMNs", "ProRegTx hashes of deleted masternodes", + {{RPCResult::Type::STR_HEX, "hash", ""}}}, + {RPCResult::Type::ARR, "mnList", "Masternode list details", + {CSimplifiedMNListEntry::GetJsonHelp(/*key=*/"", /*optional=*/false)}}, + {RPCResult::Type::ARR, "deletedQuorums", "Deleted quorums", + {{RPCResult::Type::OBJ, "", "", { + {RPCResult::Type::NUM, "llmqType", "Quorum type"}, + {RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"}, + }}}}, + {RPCResult::Type::ARR, "newQuorums", "New quorums"}, // TODO: Add definition for llmq::CFinalCommitment + GetRpcResult("merkleRootMNList", /*optional=*/true), + GetRpcResult("merkleRootQuorums", /*optional=*/true), + {RPCResult::Type::ARR, "quorumsCLSigs", "ChainLock signature details", { + {RPCResult::Type::OBJ, "", "", { + {RPCResult::Type::ARR, "", "Array of quorum indices, keyed by BLS signature", { + {RPCResult::Type::NUM, "", "Quorum index"} + }}}}}}, + }}; +} + [[nodiscard]] UniValue CSimplifiedMNListDiff::ToJson(bool extended) const { UniValue obj(UniValue::VOBJ); diff --git a/src/evo/simplifiedmns.h b/src/evo/simplifiedmns.h index 9153d0b025e1..cbc1e9434fe4 100644 --- a/src/evo/simplifiedmns.h +++ b/src/evo/simplifiedmns.h @@ -9,15 +9,19 @@ #include #include #include -#include +#include + #include #include #include -#include + +#include #include #include +struct RPCResult; + class UniValue; class CSimplifiedMNListEntry @@ -95,6 +99,7 @@ class CSimplifiedMNListEntry uint256 CalcHash() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); std::string ToString() const; [[nodiscard]] UniValue ToJson(bool extended = false) const; }; diff --git a/src/evo/smldiff.h b/src/evo/smldiff.h index 5296ddf4941d..0154403d840b 100644 --- a/src/evo/smldiff.h +++ b/src/evo/smldiff.h @@ -5,12 +5,12 @@ #ifndef BITCOIN_EVO_SMLDIFF_H #define BITCOIN_EVO_SMLDIFF_H -#include - #include #include #include #include +#include + #include #include #include @@ -21,6 +21,7 @@ class CBlockIndex; class CDeterministicMNManager; class UniValue; class ChainstateManager; +struct RPCResult; namespace llmq { class CFinalCommitment; @@ -85,6 +86,7 @@ class CSimplifiedMNListDiff const llmq::CQuorumBlockProcessor& quorum_block_processor); bool BuildQuorumChainlockInfo(const llmq::CQuorumManager& qman, const CBlockIndex* blockIndex); + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson(bool extended = false) const; }; diff --git a/src/llmq/commitment.h b/src/llmq/commitment.h index 46d663d55105..69b9507a5e81 100644 --- a/src/llmq/commitment.h +++ b/src/llmq/commitment.h @@ -5,15 +5,15 @@ #ifndef BITCOIN_LLMQ_COMMITMENT_H #define BITCOIN_LLMQ_COMMITMENT_H -#include -#include -#include - #include #include #include #include +#include +#include +#include + #include #include @@ -29,14 +29,14 @@ class ChainstateManager; class TxValidationState; template class CCheckQueueControl; +struct RPCResult; -namespace llmq -{ +namespace llmq { class CQuorumSnapshotManager; - namespace utils { struct BlsCheck; } // namespace utils + // This message is an aggregation of all received premature commitments and only valid if // enough (>=threshold) premature commitments were aggregated // This is mined on-chain as part of TRANSACTION_QUORUM_COMMITMENT @@ -175,6 +175,7 @@ class CFinalCommitmentTxPayload READWRITE(obj.nVersion, obj.nHeight, obj.commitment); } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 89b4ae290df7..4ca4f63ef1a2 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -922,13 +922,7 @@ static RPCHelpMan getblock() {RPCResult::Type::NUM, "size", "The block size"}, {RPCResult::Type::ARR, "tx", "The transaction ids", {{RPCResult::Type::STR_HEX, "", "The transaction id"}}}, - {RPCResult::Type::OBJ, "cbTx", "The coinbase special transaction", - { - {RPCResult::Type::NUM, "version", "The coinbase special transaction version"}, - {RPCResult::Type::NUM, "height", "The block height"}, - {RPCResult::Type::STR_HEX, "merkleRootMNList", "The merkle root of the masternode list"}, - {RPCResult::Type::STR_HEX, "merkleRootQuorums", "The merkle root of the quorum list"}, - }}, + CCbTx::GetJsonHelp(/*key=*/"cbTx", /*optional=*/true), }}, RPCResult{"for verbosity = 2", RPCResult::Type::OBJ, "", "", diff --git a/src/rpc/evo.cpp b/src/rpc/evo.cpp index cc7052b453c3..9907f19153f8 100644 --- a/src/rpc/evo.cpp +++ b/src/rpc/evo.cpp @@ -1586,7 +1586,7 @@ static RPCHelpMan protx_diff() {"block", RPCArg::Type::NUM, RPCArg::Optional::NO, "The ending block height."}, {"extended", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Show additional fields."}, }, - RPCResults{}, + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"", /*optional=*/false), RPCExamples{""}, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 25b96b60200c..81d031525939 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -174,6 +175,8 @@ static std::vector DecodeTxDoc(const std::string& txid_field_doc) CProUpServTx::GetJsonHelp(/*key=*/"proUpServTx", /*optional=*/true), CProUpRegTx::GetJsonHelp(/*key=*/"proUpRegTx", /*optional=*/true), CProUpRevTx::GetJsonHelp(/*key=*/"proUpRevTx", /*optional=*/true), + CCbTx::GetJsonHelp(/*key=*/"cbTx", /*optional=*/true), + llmq::CFinalCommitmentTxPayload::GetJsonHelp(/*key=*/"qcTx", /*optional=*/true), }; } From 73da03e241f6412eed6dab575d70dd27e7bb3643 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sat, 4 Oct 2025 16:12:33 +0000 Subject: [PATCH 075/656] refactor: move LLMQ-specific `ToJson()` definitions to separate file Keeping true to separation of concerns --- src/Makefile.am | 1 + src/core_io.h | 14 +++-- src/evo/core_write.cpp | 23 +------- src/llmq/commitment.h | 18 +----- src/llmq/core_write.cpp | 127 ++++++++++++++++++++++++++++++++++++++++ src/llmq/signing.cpp | 13 ---- src/llmq/signing.h | 2 +- src/llmq/snapshot.cpp | 61 ------------------- 8 files changed, 140 insertions(+), 119 deletions(-) create mode 100644 src/llmq/core_write.cpp diff --git a/src/Makefile.am b/src/Makefile.am index d6386416c7ef..9ad66b87a276 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -906,6 +906,7 @@ libbitcoin_common_a_SOURCES = \ init/common.cpp \ key.cpp \ key_io.cpp \ + llmq/core_write.cpp \ merkleblock.cpp \ net_types.cpp \ netaddress.cpp \ diff --git a/src/core_io.h b/src/core_io.h index 00cec54e27e7..d3f424d1363f 100644 --- a/src/core_io.h +++ b/src/core_io.h @@ -5,8 +5,8 @@ #ifndef BITCOIN_CORE_IO_H #define BITCOIN_CORE_IO_H -#include #include +#include #include #include @@ -15,12 +15,13 @@ class CBlock; class CBlockHeader; class CScript; class CTransaction; -struct CMutableTransaction; -class uint256; -class UniValue; class CTxUndo; - +class uint256; +struct CMutableTransaction; struct CSpentIndexTxInfo; +struct RPCResult; + +class UniValue; /** * Verbose level for block's transaction @@ -57,4 +58,7 @@ std::string SighashToStr(unsigned char sighash_type); void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex = true, bool include_address = false); void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry, bool include_hex = true, int serialize_flags = 0, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS, const CSpentIndexTxInfo* ptxSpentInfo = nullptr); +// evo/core_write.cpp +RPCResult GetRpcResult(const std::string& key, bool optional = false); + #endif // BITCOIN_CORE_IO_H diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 4d2d72af45a0..1cccced0fd9b 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -22,8 +22,7 @@ #include #include -namespace { -RPCResult GetRpcResult(const std::string& key, bool optional = false) +RPCResult GetRpcResult(const std::string& key, bool optional) { #define RESULT_MAP_ENTRY(type, name, desc) {name, {type, name, optional, desc}} const std::map result_map{{ @@ -62,7 +61,6 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) throw NonFatalCheckError(strprintf("Requested invalid RPCResult for nonexistent key \"%s\"", key).c_str(), __FILE__, __LINE__, __func__); } -} // anonymous namespace [[nodiscard]] UniValue CAssetLockPayload::ToJson() const { @@ -266,25 +264,6 @@ RPCResult GetRpcResult(const std::string& key, bool optional = false) return ret; } -[[nodiscard]] RPCResult llmq::CFinalCommitmentTxPayload::GetJsonHelp(const std::string& key, bool optional) -{ - return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment special transaction", - { - GetRpcResult("version"), - GetRpcResult("height"), - // TODO: Add RPCResult for llmq::CFinalCommitment - }}; -} - -[[nodiscard]] UniValue llmq::CFinalCommitmentTxPayload::ToJson() const -{ - UniValue ret(UniValue::VOBJ); - ret.pushKV("version", nVersion); - ret.pushKV("height", nHeight); - ret.pushKV("commitment", commitment.ToJson()); - return ret; -} - [[nodiscard]] RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list entry", diff --git a/src/llmq/commitment.h b/src/llmq/commitment.h index 69b9507a5e81..54b96c19bc96 100644 --- a/src/llmq/commitment.h +++ b/src/llmq/commitment.h @@ -130,23 +130,7 @@ class CFinalCommitment return true; } - [[nodiscard]] UniValue ToJson() const - { - UniValue obj(UniValue::VOBJ); - obj.pushKV("version", nVersion); - obj.pushKV("llmqType", ToUnderlying(llmqType)); - obj.pushKV("quorumHash", quorumHash.ToString()); - obj.pushKV("quorumIndex", quorumIndex); - obj.pushKV("signersCount", CountSigners()); - obj.pushKV("signers", BitsVectorToHexStr(signers)); - obj.pushKV("validMembersCount", CountValidMembers()); - obj.pushKV("validMembers", BitsVectorToHexStr(validMembers)); - obj.pushKV("quorumPublicKey", quorumPublicKey.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); - obj.pushKV("quorumVvecHash", quorumVvecHash.ToString()); - obj.pushKV("quorumSig", quorumSig.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); - obj.pushKV("membersSig", membersSig.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); - return obj; - } + [[nodiscard]] UniValue ToJson() const; private: static std::string BitsVectorToHexStr(const std::vector& vBits) diff --git a/src/llmq/core_write.cpp b/src/llmq/core_write.cpp new file mode 100644 index 000000000000..47b92e1d9daa --- /dev/null +++ b/src/llmq/core_write.cpp @@ -0,0 +1,127 @@ +// Copyright (c) 2018-2025 The Dash Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace llmq { +[[nodiscard]] UniValue CFinalCommitment::ToJson() const +{ + UniValue obj(UniValue::VOBJ); + obj.pushKV("version", nVersion); + obj.pushKV("llmqType", ToUnderlying(llmqType)); + obj.pushKV("quorumHash", quorumHash.ToString()); + obj.pushKV("quorumIndex", quorumIndex); + obj.pushKV("signersCount", CountSigners()); + obj.pushKV("signers", BitsVectorToHexStr(signers)); + obj.pushKV("validMembersCount", CountValidMembers()); + obj.pushKV("validMembers", BitsVectorToHexStr(validMembers)); + obj.pushKV("quorumPublicKey", quorumPublicKey.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); + obj.pushKV("quorumVvecHash", quorumVvecHash.ToString()); + obj.pushKV("quorumSig", quorumSig.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); + obj.pushKV("membersSig", membersSig.ToString(nVersion == LEGACY_BLS_NON_INDEXED_QUORUM_VERSION || nVersion == LEGACY_BLS_INDEXED_QUORUM_VERSION)); + return obj; +} + +[[nodiscard]] RPCResult CFinalCommitmentTxPayload::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment special transaction", + { + GetRpcResult("version"), + GetRpcResult("height"), + // TODO: Add RPCResult for llmq::CFinalCommitment + }}; +} + +[[nodiscard]] UniValue CFinalCommitmentTxPayload::ToJson() const +{ + UniValue ret(UniValue::VOBJ); + ret.pushKV("version", nVersion); + ret.pushKV("height", nHeight); + ret.pushKV("commitment", commitment.ToJson()); + return ret; +} + +[[nodiscard]] UniValue CQuorumRotationInfo::ToJson() const +{ + UniValue obj(UniValue::VOBJ); + obj.pushKV("extraShare", extraShare); + + obj.pushKV("quorumSnapshotAtHMinusC", quorumSnapshotAtHMinusC.ToJson()); + obj.pushKV("quorumSnapshotAtHMinus2C", quorumSnapshotAtHMinus2C.ToJson()); + obj.pushKV("quorumSnapshotAtHMinus3C", quorumSnapshotAtHMinus3C.ToJson()); + + if (extraShare) { + obj.pushKV("quorumSnapshotAtHMinus4C", quorumSnapshotAtHMinus4C.ToJson()); + } + + obj.pushKV("mnListDiffTip", mnListDiffTip.ToJson()); + obj.pushKV("mnListDiffH", mnListDiffH.ToJson()); + obj.pushKV("mnListDiffAtHMinusC", mnListDiffAtHMinusC.ToJson()); + obj.pushKV("mnListDiffAtHMinus2C", mnListDiffAtHMinus2C.ToJson()); + obj.pushKV("mnListDiffAtHMinus3C", mnListDiffAtHMinus3C.ToJson()); + + if (extraShare) { + obj.pushKV("mnListDiffAtHMinus4C", mnListDiffAtHMinus4C.ToJson()); + } + UniValue hqclists(UniValue::VARR); + for (const auto& qc : lastCommitmentPerIndex) { + hqclists.push_back(qc.ToJson()); + } + obj.pushKV("lastCommitmentPerIndex", hqclists); + + UniValue snapshotlist(UniValue::VARR); + for (const auto& snap : quorumSnapshotList) { + snapshotlist.push_back(snap.ToJson()); + } + obj.pushKV("quorumSnapshotList", snapshotlist); + + UniValue mnlistdifflist(UniValue::VARR); + for (const auto& mnlist : mnListDiffList) { + mnlistdifflist.push_back(mnlist.ToJson()); + } + obj.pushKV("mnListDiffList", mnlistdifflist); + return obj; +} + +[[nodiscard]] UniValue CQuorumSnapshot::ToJson() const +{ + UniValue obj(UniValue::VOBJ); + UniValue activeQ(UniValue::VARR); + for (const bool h : activeQuorumMembers) { + // cppcheck-suppress useStlAlgorithm + activeQ.push_back(h); + } + obj.pushKV("activeQuorumMembers", activeQ); + obj.pushKV("mnSkipListMode", mnSkipListMode); + UniValue skipList(UniValue::VARR); + for (const auto& h : mnSkipList) { + // cppcheck-suppress useStlAlgorithm + skipList.push_back(h); + } + obj.pushKV("mnSkipList", skipList); + return obj; +} + +[[nodiscard]] UniValue CRecoveredSig::ToJson() const +{ + UniValue ret(UniValue::VOBJ); + ret.pushKV("llmqType", ToUnderlying(llmqType)); + ret.pushKV("quorumHash", quorumHash.ToString()); + ret.pushKV("id", id.ToString()); + ret.pushKV("msgHash", msgHash.ToString()); + ret.pushKV("sig", sig.Get().ToString()); + ret.pushKV("hash", sig.Get().GetHash().ToString()); + return ret; +} +} // namespace llmq diff --git a/src/llmq/signing.cpp b/src/llmq/signing.cpp index f66effa2d02d..55c0ab19db25 100644 --- a/src/llmq/signing.cpp +++ b/src/llmq/signing.cpp @@ -27,19 +27,6 @@ namespace llmq { -UniValue CRecoveredSig::ToJson() const -{ - UniValue ret(UniValue::VOBJ); - ret.pushKV("llmqType", ToUnderlying(llmqType)); - ret.pushKV("quorumHash", quorumHash.ToString()); - ret.pushKV("id", id.ToString()); - ret.pushKV("msgHash", msgHash.ToString()); - ret.pushKV("sig", sig.Get().ToString()); - ret.pushKV("hash", sig.Get().GetHash().ToString()); - return ret; -} - - CRecoveredSigsDb::CRecoveredSigsDb(bool fMemory, bool fWipe) : db(std::make_unique(fMemory ? "" : (gArgs.GetDataDirNet() / "llmq/recsigdb"), 8 << 20, fMemory, fWipe)) { diff --git a/src/llmq/signing.h b/src/llmq/signing.h index b835079d5b19..1097240c590e 100644 --- a/src/llmq/signing.h +++ b/src/llmq/signing.h @@ -106,7 +106,7 @@ class CRecoveredSig : virtual public CSigBase return hash; } - UniValue ToJson() const; + [[nodiscard]] UniValue ToJson() const; }; class CRecoveredSigsDb diff --git a/src/llmq/snapshot.cpp b/src/llmq/snapshot.cpp index 3544177dc575..3e6ccd6955b5 100644 --- a/src/llmq/snapshot.cpp +++ b/src/llmq/snapshot.cpp @@ -17,67 +17,6 @@ namespace llmq { static const std::string DB_QUORUM_SNAPSHOT = "llmq_S"; -UniValue CQuorumSnapshot::ToJson() const -{ - UniValue obj(UniValue::VOBJ); - UniValue activeQ(UniValue::VARR); - for (const bool h : activeQuorumMembers) { - // cppcheck-suppress useStlAlgorithm - activeQ.push_back(h); - } - obj.pushKV("activeQuorumMembers", activeQ); - obj.pushKV("mnSkipListMode", mnSkipListMode); - UniValue skipList(UniValue::VARR); - for (const auto& h : mnSkipList) { - // cppcheck-suppress useStlAlgorithm - skipList.push_back(h); - } - obj.pushKV("mnSkipList", skipList); - return obj; -} - -UniValue CQuorumRotationInfo::ToJson() const -{ - UniValue obj(UniValue::VOBJ); - obj.pushKV("extraShare", extraShare); - - obj.pushKV("quorumSnapshotAtHMinusC", quorumSnapshotAtHMinusC.ToJson()); - obj.pushKV("quorumSnapshotAtHMinus2C", quorumSnapshotAtHMinus2C.ToJson()); - obj.pushKV("quorumSnapshotAtHMinus3C", quorumSnapshotAtHMinus3C.ToJson()); - - if (extraShare) { - obj.pushKV("quorumSnapshotAtHMinus4C", quorumSnapshotAtHMinus4C.ToJson()); - } - - obj.pushKV("mnListDiffTip", mnListDiffTip.ToJson()); - obj.pushKV("mnListDiffH", mnListDiffH.ToJson()); - obj.pushKV("mnListDiffAtHMinusC", mnListDiffAtHMinusC.ToJson()); - obj.pushKV("mnListDiffAtHMinus2C", mnListDiffAtHMinus2C.ToJson()); - obj.pushKV("mnListDiffAtHMinus3C", mnListDiffAtHMinus3C.ToJson()); - - if (extraShare) { - obj.pushKV("mnListDiffAtHMinus4C", mnListDiffAtHMinus4C.ToJson()); - } - UniValue hqclists(UniValue::VARR); - for (const auto& qc : lastCommitmentPerIndex) { - hqclists.push_back(qc.ToJson()); - } - obj.pushKV("lastCommitmentPerIndex", hqclists); - - UniValue snapshotlist(UniValue::VARR); - for (const auto& snap : quorumSnapshotList) { - snapshotlist.push_back(snap.ToJson()); - } - obj.pushKV("quorumSnapshotList", snapshotlist); - - UniValue mnlistdifflist(UniValue::VARR); - for (const auto& mnlist : mnListDiffList) { - mnlistdifflist.push_back(mnlist.ToJson()); - } - obj.pushKV("mnListDiffList", mnlistdifflist); - return obj; -} - bool BuildQuorumRotationInfo(CDeterministicMNManager& dmnman, CQuorumSnapshotManager& qsnapman, const ChainstateManager& chainman, const CQuorumManager& qman, const CQuorumBlockProcessor& qblockman, const CGetQuorumRotationInfo& request, From ddc698bd3df670d6245d80d2bac353e216e431ce Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 5 Oct 2025 12:08:56 +0000 Subject: [PATCH 076/656] refactor: move pending `evo/`-specific definitions to `core_write.cpp` We exclude the following: - `evo/netinfo.{cpp,h}` (already a part of `libbitcoin_common`) as it as an inheritance structure and the help text for it is relatively uncomplicated. - `CDeterministicMN::ToJson() `and `CDeterministicMNStateDiff::ToJson()` as it relies on `g_txindex` that is part of `libbitcoin_node` so it cannot be part of `evo/core_write.cpp`. --- src/evo/core_write.cpp | 156 ++++++++++++++++++++++++++--------------- src/evo/dmnstate.cpp | 32 --------- src/evo/mnhftx.h | 9 +-- 3 files changed, 100 insertions(+), 97 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 1cccced0fd9b..673ddf70e32b 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -124,6 +125,38 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } +[[nodiscard]] UniValue CDeterministicMNState::ToJson(MnType nType) const +{ + UniValue obj(UniValue::VOBJ); + obj.pushKV("version", nVersion); + obj.pushKV("service", netInfo->GetPrimary().ToStringAddrPort()); + obj.pushKV("addresses", GetNetInfoWithLegacyFields(*this, nType)); + obj.pushKV("registeredHeight", nRegisteredHeight); + obj.pushKV("lastPaidHeight", nLastPaidHeight); + obj.pushKV("consecutivePayments", nConsecutivePayments); + obj.pushKV("PoSePenalty", nPoSePenalty); + obj.pushKV("PoSeRevivedHeight", nPoSeRevivedHeight); + obj.pushKV("PoSeBanHeight", nPoSeBanHeight); + obj.pushKV("revocationReason", nRevocationReason); + obj.pushKV("ownerAddress", EncodeDestination(PKHash(keyIDOwner))); + obj.pushKV("votingAddress", EncodeDestination(PKHash(keyIDVoting))); + if (nType == MnType::Evo) { + obj.pushKV("platformNodeID", platformNodeID.ToString()); + obj.pushKV("platformP2PPort", GetPlatformPort(*this)); + obj.pushKV("platformHTTPPort", GetPlatformPort(*this)); + } + + CTxDestination dest; + if (ExtractDestination(scriptPayout, dest)) { + obj.pushKV("payoutAddress", EncodeDestination(dest)); + } + obj.pushKV("pubKeyOperator", pubKeyOperator.ToString()); + if (ExtractDestination(scriptOperatorPayout, dest)) { + obj.pushKV("operatorPayoutAddress", EncodeDestination(dest)); + } + return obj; +} + [[nodiscard]] RPCResult CProRegTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode registration special transaction", @@ -256,63 +289,6 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] UniValue MNHFTxPayload::ToJson() const -{ - UniValue ret(UniValue::VOBJ); - ret.pushKV("version", nVersion); - ret.pushKV("signal", signal.ToJson()); - return ret; -} - -[[nodiscard]] RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) -{ - return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list entry", - { - {RPCResult::Type::NUM, "nVersion", "Version of the entry"}, - {RPCResult::Type::NUM, "nType", "Masternode type"}, - {RPCResult::Type::STR_HEX, "proRegTxHash", "Hash of the ProRegTx identifying the masternode"}, - {RPCResult::Type::STR_HEX, "confirmedHash", "Hash of the block where the masternode was confirmed"}, - GetRpcResult("service"), - GetRpcResult("addresses"), - GetRpcResult("pubKeyOperator"), - GetRpcResult("votingAddress"), - {RPCResult::Type::BOOL, "isValid", "Returns true if the masternode is not Proof-of-Service banned"}, - GetRpcResult("platformHTTPPort", /*optional=*/true), - GetRpcResult("platformNodeID", /*optional=*/true), - GetRpcResult("payoutAddress", /*optional=*/true), - GetRpcResult("operatorPayoutAddress", /*optional=*/true), - }}; -} - -[[nodiscard]] UniValue CSimplifiedMNListEntry::ToJson(bool extended) const -{ - UniValue obj(UniValue::VOBJ); - obj.pushKV("nVersion", nVersion); - obj.pushKV("nType", ToUnderlying(nType)); - obj.pushKV("proRegTxHash", proRegTxHash.ToString()); - obj.pushKV("confirmedHash", confirmedHash.ToString()); - obj.pushKV("service", netInfo->GetPrimary().ToStringAddrPort()); - obj.pushKV("addresses", GetNetInfoWithLegacyFields(*this, nType)); - obj.pushKV("pubKeyOperator", pubKeyOperator.ToString()); - obj.pushKV("votingAddress", EncodeDestination(PKHash(keyIDVoting))); - obj.pushKV("isValid", isValid); - if (nType == MnType::Evo) { - obj.pushKV("platformHTTPPort", GetPlatformPort(*this)); - obj.pushKV("platformNodeID", platformNodeID.ToString()); - } - - if (extended) { - CTxDestination dest; - if (ExtractDestination(scriptPayout, dest)) { - obj.pushKV("payoutAddress", EncodeDestination(dest)); - } - if (ExtractDestination(scriptOperatorPayout, dest)) { - obj.pushKV("operatorPayoutAddress", EncodeDestination(dest)); - } - } - return obj; -} - [[nodiscard]] RPCResult CSimplifiedMNListDiff::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list diff", @@ -404,3 +380,69 @@ RPCResult GetRpcResult(const std::string& key, bool optional) obj.pushKV("quorumsCLSigs", quorumsCLSigsArr); return obj; } + +[[nodiscard]] RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list entry", + { + {RPCResult::Type::NUM, "nVersion", "Version of the entry"}, + {RPCResult::Type::NUM, "nType", "Masternode type"}, + {RPCResult::Type::STR_HEX, "proRegTxHash", "Hash of the ProRegTx identifying the masternode"}, + {RPCResult::Type::STR_HEX, "confirmedHash", "Hash of the block where the masternode was confirmed"}, + GetRpcResult("service"), + GetRpcResult("addresses"), + GetRpcResult("pubKeyOperator"), + GetRpcResult("votingAddress"), + {RPCResult::Type::BOOL, "isValid", "Returns true if the masternode is not Proof-of-Service banned"}, + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("operatorPayoutAddress", /*optional=*/true), + }}; +} + +[[nodiscard]] UniValue CSimplifiedMNListEntry::ToJson(bool extended) const +{ + UniValue obj(UniValue::VOBJ); + obj.pushKV("nVersion", nVersion); + obj.pushKV("nType", ToUnderlying(nType)); + obj.pushKV("proRegTxHash", proRegTxHash.ToString()); + obj.pushKV("confirmedHash", confirmedHash.ToString()); + obj.pushKV("service", netInfo->GetPrimary().ToStringAddrPort()); + obj.pushKV("addresses", GetNetInfoWithLegacyFields(*this, nType)); + obj.pushKV("pubKeyOperator", pubKeyOperator.ToString()); + obj.pushKV("votingAddress", EncodeDestination(PKHash(keyIDVoting))); + obj.pushKV("isValid", isValid); + if (nType == MnType::Evo) { + obj.pushKV("platformHTTPPort", GetPlatformPort(*this)); + obj.pushKV("platformNodeID", platformNodeID.ToString()); + } + + if (extended) { + CTxDestination dest; + if (ExtractDestination(scriptPayout, dest)) { + obj.pushKV("payoutAddress", EncodeDestination(dest)); + } + if (ExtractDestination(scriptOperatorPayout, dest)) { + obj.pushKV("operatorPayoutAddress", EncodeDestination(dest)); + } + } + return obj; +} + +[[nodiscard]] UniValue MNHFTx::ToJson() const +{ + UniValue obj(UniValue::VOBJ); + obj.pushKV("versionBit", versionBit); + obj.pushKV("quorumHash", quorumHash.ToString()); + obj.pushKV("sig", sig.ToString()); + return obj; +} + +[[nodiscard]] UniValue MNHFTxPayload::ToJson() const +{ + UniValue ret(UniValue::VOBJ); + ret.pushKV("version", nVersion); + ret.pushKV("signal", signal.ToJson()); + return ret; +} diff --git a/src/evo/dmnstate.cpp b/src/evo/dmnstate.cpp index c9d9a9e71948..54d463db3d13 100644 --- a/src/evo/dmnstate.cpp +++ b/src/evo/dmnstate.cpp @@ -32,38 +32,6 @@ std::string CDeterministicMNState::ToString() const EncodeDestination(PKHash(keyIDVoting)), netInfo->ToString(), payoutAddress, operatorPayoutAddress); } -UniValue CDeterministicMNState::ToJson(MnType nType) const -{ - UniValue obj(UniValue::VOBJ); - obj.pushKV("version", nVersion); - obj.pushKV("service", netInfo->GetPrimary().ToStringAddrPort()); - obj.pushKV("addresses", GetNetInfoWithLegacyFields(*this, nType)); - obj.pushKV("registeredHeight", nRegisteredHeight); - obj.pushKV("lastPaidHeight", nLastPaidHeight); - obj.pushKV("consecutivePayments", nConsecutivePayments); - obj.pushKV("PoSePenalty", nPoSePenalty); - obj.pushKV("PoSeRevivedHeight", nPoSeRevivedHeight); - obj.pushKV("PoSeBanHeight", nPoSeBanHeight); - obj.pushKV("revocationReason", nRevocationReason); - obj.pushKV("ownerAddress", EncodeDestination(PKHash(keyIDOwner))); - obj.pushKV("votingAddress", EncodeDestination(PKHash(keyIDVoting))); - if (nType == MnType::Evo) { - obj.pushKV("platformNodeID", platformNodeID.ToString()); - obj.pushKV("platformP2PPort", GetPlatformPort(*this)); - obj.pushKV("platformHTTPPort", GetPlatformPort(*this)); - } - - CTxDestination dest; - if (ExtractDestination(scriptPayout, dest)) { - obj.pushKV("payoutAddress", EncodeDestination(dest)); - } - obj.pushKV("pubKeyOperator", pubKeyOperator.ToString()); - if (ExtractDestination(scriptOperatorPayout, dest)) { - obj.pushKV("operatorPayoutAddress", EncodeDestination(dest)); - } - return obj; -} - UniValue CDeterministicMNStateDiff::ToJson(MnType nType) const { UniValue obj(UniValue::VOBJ); diff --git a/src/evo/mnhftx.h b/src/evo/mnhftx.h index 8adda3655368..f3d995be652d 100644 --- a/src/evo/mnhftx.h +++ b/src/evo/mnhftx.h @@ -50,14 +50,7 @@ class MNHFTx std::string ToString() const; - [[nodiscard]] UniValue ToJson() const - { - UniValue obj(UniValue::VOBJ); - obj.pushKV("versionBit", versionBit); - obj.pushKV("quorumHash", quorumHash.ToString()); - obj.pushKV("sig", sig.ToString()); - return obj; - } + [[nodiscard]] UniValue ToJson() const; }; class MNHFTxPayload From ae4e05c14792cb90be1c2f7ea978d4eb131107e4 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 6 Oct 2025 20:24:20 +0000 Subject: [PATCH 077/656] rpc: `GetJsonHelp()` defs for `MNHFTx`, `CAsset{Un,}lock` --- src/evo/assetlocktx.h | 3 +++ src/evo/core_write.cpp | 52 +++++++++++++++++++++++++++++++++++++- src/evo/mnhftx.h | 3 +++ src/rpc/rawtransaction.cpp | 4 +++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/src/evo/assetlocktx.h b/src/evo/assetlocktx.h index 2fc44643fa8f..433dd098ee47 100644 --- a/src/evo/assetlocktx.h +++ b/src/evo/assetlocktx.h @@ -17,6 +17,7 @@ class CBlockIndex; class CRangesSet; class TxValidationState; +struct RPCResult; namespace llmq { class CQuorumManager; } // namespace llmq @@ -51,6 +52,7 @@ class CAssetLockPayload std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; // getters @@ -108,6 +110,7 @@ class CAssetUnlockPayload std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; bool VerifySig(const llmq::CQuorumManager& qman, const uint256& msgHash, gsl::not_null pindexTip, TxValidationState& state) const; diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 673ddf70e32b..1524d24a143f 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -48,6 +48,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformP2PPort", "(DEPRECATED) TCP port of Platform P2P"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "proTxHash", "Hash of the masternode's initial ProRegTx"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "pubKeyOperator", "BLS public key used for operator signing"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "service", "(DEPRECATED) IP address and port of the masternode"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "type", "Masternode type"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "version", "Special transaction version"), @@ -63,6 +64,23 @@ RPCResult GetRpcResult(const std::string& key, bool optional) __FILE__, __LINE__, __func__); } +[[nodiscard]] RPCResult CAssetLockPayload::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The asset lock special transaction", + { + GetRpcResult("version"), + {RPCResult::Type::ARR, "creditOutputs", "", { + {RPCResult::Type::OBJ, "", "", { + {RPCResult::Type::NUM, "value", "The value in Dash"}, + {RPCResult::Type::NUM, "valueSat", "The value in duffs"}, + {RPCResult::Type::OBJ, "scriptPubKey", "", { + {RPCResult::Type::STR, "asm", "The asm"}, + {RPCResult::Type::STR_HEX, "hex", "The hex"}, + {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"}, + }}}}}} + }}; +} + [[nodiscard]] UniValue CAssetLockPayload::ToJson() const { UniValue outputs(UniValue::VARR); @@ -82,6 +100,19 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } +[[nodiscard]] RPCResult CAssetUnlockPayload::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The asset unlock special transaction", + { + GetRpcResult("version"), + {RPCResult::Type::NUM, "index", "Index of the transaction"}, + {RPCResult::Type::NUM, "fee", "Transaction fee in duffs awarded to the miner"}, + {RPCResult::Type::NUM, "requestedHeight", "Payment chain block height known by Platform when signing the withdrawal"}, + GetRpcResult("quorumHash"), + {RPCResult::Type::STR_HEX, "quorumSig", "BLS signature by a quorum public key"}, + }}; +} + [[nodiscard]] UniValue CAssetUnlockPayload::ToJson() const { UniValue ret(UniValue::VOBJ); @@ -305,7 +336,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) {RPCResult::Type::ARR, "deletedQuorums", "Deleted quorums", {{RPCResult::Type::OBJ, "", "", { {RPCResult::Type::NUM, "llmqType", "Quorum type"}, - {RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"}, + GetRpcResult("quorumHash"), }}}}, {RPCResult::Type::ARR, "newQuorums", "New quorums"}, // TODO: Add definition for llmq::CFinalCommitment GetRpcResult("merkleRootMNList", /*optional=*/true), @@ -430,6 +461,16 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } +[[nodiscard]] RPCResult MNHFTx::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode hard fork payload", + { + {RPCResult::Type::NUM, "versionBit", "Version bit associated with the hard fork"}, + GetRpcResult("quorumHash"), + {RPCResult::Type::STR_HEX, "sig", "BLS signature by a quorum public key"}, + }}; +} + [[nodiscard]] UniValue MNHFTx::ToJson() const { UniValue obj(UniValue::VOBJ); @@ -439,6 +480,15 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } +[[nodiscard]] RPCResult MNHFTxPayload::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode hard fork signal special transaction", + { + GetRpcResult("version"), + MNHFTx::GetJsonHelp(/*key=*/"signal", /*optional=*/false), + }}; +} + [[nodiscard]] UniValue MNHFTxPayload::ToJson() const { UniValue ret(UniValue::VOBJ); diff --git a/src/evo/mnhftx.h b/src/evo/mnhftx.h index f3d995be652d..855607b21698 100644 --- a/src/evo/mnhftx.h +++ b/src/evo/mnhftx.h @@ -26,6 +26,7 @@ class CEvoDB; class CTransaction; class ChainstateManager; class TxValidationState; +struct RPCResult; namespace llmq { class CQuorumManager; } @@ -50,6 +51,7 @@ class MNHFTx std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; @@ -80,6 +82,7 @@ class MNHFTxPayload std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 81d031525939..77de92f39f67 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -177,6 +178,9 @@ static std::vector DecodeTxDoc(const std::string& txid_field_doc) CProUpRevTx::GetJsonHelp(/*key=*/"proUpRevTx", /*optional=*/true), CCbTx::GetJsonHelp(/*key=*/"cbTx", /*optional=*/true), llmq::CFinalCommitmentTxPayload::GetJsonHelp(/*key=*/"qcTx", /*optional=*/true), + MNHFTxPayload::GetJsonHelp(/*key=*/"mnhfTx", /*optional=*/true), + CAssetLockPayload::GetJsonHelp(/*key=*/"assetLockTx", /*optional=*/true), + CAssetUnlockPayload::GetJsonHelp(/*key=*/"assetUnlockTx", /*optional=*/true), }; } From 1be22261cc06dd46d3549c40c863615f5fa47ad7 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 6 Oct 2025 15:52:22 +0000 Subject: [PATCH 078/656] rpc: `GetJsonHelp()` defs for `Quorum{Rotation,Snapshot}`,`RecoveredSig` --- src/evo/core_write.cpp | 9 +++-- src/llmq/commitment.h | 1 + src/llmq/core_write.cpp | 75 ++++++++++++++++++++++++++++++++++++++++- src/llmq/signing.h | 3 ++ src/llmq/snapshot.h | 4 +++ src/rpc/quorums.cpp | 5 +-- 6 files changed, 91 insertions(+), 6 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 1524d24a143f..63a988f2ec32 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -39,6 +39,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}}, RESULT_MAP_ENTRY(RPCResult::Type::NUM, "height", "Block height"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "inputsHash", "Hash of all the outpoints of the transaction inputs"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "llmqType", "Quorum type"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootMNList", "Merkle root of the masternode list"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootQuorums", "Merkle root of the quorum list"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "operatorPayoutAddress", "Dash address used for operator reward payments"), @@ -49,6 +50,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "proTxHash", "Hash of the masternode's initial ProRegTx"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "pubKeyOperator", "BLS public key used for operator signing"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"), + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumSig", "BLS recovered threshold signature of quorum"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "service", "(DEPRECATED) IP address and port of the masternode"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "type", "Masternode type"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "version", "Special transaction version"), @@ -109,7 +111,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) {RPCResult::Type::NUM, "fee", "Transaction fee in duffs awarded to the miner"}, {RPCResult::Type::NUM, "requestedHeight", "Payment chain block height known by Platform when signing the withdrawal"}, GetRpcResult("quorumHash"), - {RPCResult::Type::STR_HEX, "quorumSig", "BLS signature by a quorum public key"}, + GetRpcResult("quorumSig"), }}; } @@ -335,10 +337,11 @@ RPCResult GetRpcResult(const std::string& key, bool optional) {CSimplifiedMNListEntry::GetJsonHelp(/*key=*/"", /*optional=*/false)}}, {RPCResult::Type::ARR, "deletedQuorums", "Deleted quorums", {{RPCResult::Type::OBJ, "", "", { - {RPCResult::Type::NUM, "llmqType", "Quorum type"}, + GetRpcResult("llmqType"), GetRpcResult("quorumHash"), }}}}, - {RPCResult::Type::ARR, "newQuorums", "New quorums"}, // TODO: Add definition for llmq::CFinalCommitment + {RPCResult::Type::ARR, "newQuorums", "New quorums", + {llmq::CFinalCommitment::GetJsonHelp(/*key=*/"", /*optional=*/false)}}, GetRpcResult("merkleRootMNList", /*optional=*/true), GetRpcResult("merkleRootQuorums", /*optional=*/true), {RPCResult::Type::ARR, "quorumsCLSigs", "ChainLock signature details", { diff --git a/src/llmq/commitment.h b/src/llmq/commitment.h index 54b96c19bc96..60cf499d5a2a 100644 --- a/src/llmq/commitment.h +++ b/src/llmq/commitment.h @@ -130,6 +130,7 @@ class CFinalCommitment return true; } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; private: diff --git a/src/llmq/core_write.cpp b/src/llmq/core_write.cpp index 47b92e1d9daa..8a8137ed7072 100644 --- a/src/llmq/core_write.cpp +++ b/src/llmq/core_write.cpp @@ -15,6 +15,25 @@ #include namespace llmq { +[[nodiscard]] RPCResult CFinalCommitment::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment payload", + { + {RPCResult::Type::NUM, "version", "Quorum commitment payload version"}, + GetRpcResult("llmqType"), + GetRpcResult("quorumHash"), + {RPCResult::Type::NUM, "quorumIndex", "Index of the quorum"}, + {RPCResult::Type::NUM, "signersCount", "Number of signers for the quorum"}, + {RPCResult::Type::STR_HEX, "signers", "Bitset representing the aggregated signers"}, + {RPCResult::Type::NUM, "validMembersCount", "Number of valid members in the quorum"}, + {RPCResult::Type::STR_HEX, "validMembers", "Bitset of valid members"}, + {RPCResult::Type::STR_HEX, "quorumPublicKey", "BLS public key of the quorum"}, + {RPCResult::Type::STR_HEX, "quorumVvecHash", "Hash of the quorum verification vector"}, + GetRpcResult("quorumSig"), + {RPCResult::Type::STR_HEX, "membersSig", "BLS signature from all included commitments"}, + }}; +} + [[nodiscard]] UniValue CFinalCommitment::ToJson() const { UniValue obj(UniValue::VOBJ); @@ -39,7 +58,7 @@ namespace llmq { { GetRpcResult("version"), GetRpcResult("height"), - // TODO: Add RPCResult for llmq::CFinalCommitment + CFinalCommitment::GetJsonHelp(/*key=*/"commitment", /*optional=*/false), }}; } @@ -52,6 +71,33 @@ namespace llmq { return ret; } +[[nodiscard]] RPCResult CQuorumRotationInfo::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum rotation", + { + {RPCResult::Type::BOOL, "extraShare", "Returns true if an extra share is returned"}, + CQuorumSnapshot::GetJsonHelp(/*key=*/"quorumSnapshotAtHMinusC", /*optional=*/false), + CQuorumSnapshot::GetJsonHelp(/*key=*/"quorumSnapshotAtHMinus2C", /*optional=*/false), + CQuorumSnapshot::GetJsonHelp(/*key=*/"quorumSnapshotAtHMinus3C", /*optional=*/false), + CQuorumSnapshot::GetJsonHelp(/*key=*/"quorumSnapshotAtHMinus4C", /*optional=*/true), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffTip", /*optional=*/false), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffH", /*optional=*/false), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffAtHMinusC", /*optional=*/false), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffAtHMinus2C", /*optional=*/false), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffAtHMinus3C", /*optional=*/false), + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"mnListDiffAtHMinus4C", /*optional=*/true), + {RPCResult::Type::ARR, "lastCommitmentPerIndex", "Most recent commitment for each quorumIndex", { + CFinalCommitment::GetJsonHelp(/*key=*/"", /*optional=*/false), + }}, + {RPCResult::Type::ARR, "quorumSnapshotList", "Snapshots required to reconstruct the quorums built at h' in lastCommitmentPerIndex", { + CQuorumSnapshot::GetJsonHelp(/*key=*/"", /*optional=*/false), + }}, + {RPCResult::Type::ARR, "mnListDiffList", "MnListDiffs required to calculate older quorums", { + CSimplifiedMNListDiff::GetJsonHelp(/*key=*/"", /*optional=*/false), + }}, + }}; +} + [[nodiscard]] UniValue CQuorumRotationInfo::ToJson() const { UniValue obj(UniValue::VOBJ); @@ -94,6 +140,20 @@ namespace llmq { return obj; } +[[nodiscard]] RPCResult CQuorumSnapshot::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum snapshot", + { + {RPCResult::Type::ARR, "activeQuorumMembers", "Bitset of nodes already in quarters at the start of cycle", { + {RPCResult::Type::BOOL, "bit", ""} + }}, + {RPCResult::Type::NUM, "mnSkipListMode", "Mode of the skip list"}, + {RPCResult::Type::ARR, "mnSkipList", "Skiplist at height", { + {RPCResult::Type::NUM, "height", ""} + }}, + }}; +} + [[nodiscard]] UniValue CQuorumSnapshot::ToJson() const { UniValue obj(UniValue::VOBJ); @@ -113,6 +173,19 @@ namespace llmq { return obj; } +[[nodiscard]] RPCResult CRecoveredSig::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The recovered signature", + { + GetRpcResult("llmqType"), + GetRpcResult("quorumHash"), + {RPCResult::Type::NUM, "id", "Signing session ID"}, + {RPCResult::Type::STR_HEX, "msgHash", "Hash of message"}, + {RPCResult::Type::STR_HEX, "sig", "BLS signature recovered"}, + {RPCResult::Type::STR_HEX, "hash", "Hash of the BLS signature recovered"}, + }}; +} + [[nodiscard]] UniValue CRecoveredSig::ToJson() const { UniValue ret(UniValue::VOBJ); diff --git a/src/llmq/signing.h b/src/llmq/signing.h index 1097240c590e..2425de093604 100644 --- a/src/llmq/signing.h +++ b/src/llmq/signing.h @@ -30,6 +30,8 @@ class CDBBatch; class CDBWrapper; class CInv; class PeerManager; +struct RPCResult; + class UniValue; namespace llmq { @@ -106,6 +108,7 @@ class CRecoveredSig : virtual public CSigBase return hash; } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; diff --git a/src/llmq/snapshot.h b/src/llmq/snapshot.h index 2d161ed00faf..3b1035a37b12 100644 --- a/src/llmq/snapshot.h +++ b/src/llmq/snapshot.h @@ -19,6 +19,8 @@ class CBlockIndex; class CEvoDB; +struct RPCResult; + class UniValue; namespace llmq { @@ -82,6 +84,7 @@ class CQuorumSnapshot } } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; @@ -198,6 +201,7 @@ class CQuorumRotationInfo CQuorumRotationInfo() = default; CQuorumRotationInfo(const CQuorumRotationInfo& dmn) {} + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; diff --git a/src/rpc/quorums.cpp b/src/rpc/quorums.cpp index d4776d97e745..a9b3b88d3455 100644 --- a/src/rpc/quorums.cpp +++ b/src/rpc/quorums.cpp @@ -365,6 +365,7 @@ static RPCHelpMan quorum_dkgstatus() std::optional> vfqc = llmq_ctx.quorum_block_processor->GetMineableCommitments(llmq_params, tipHeight); if (vfqc.has_value()) { for (const auto& fqc : vfqc.value()) { + // TODO: Use CFinalCommitment::GetJsonHelp() for fqc minableCommitments.push_back(fqc.ToJson()); } } @@ -651,7 +652,7 @@ static RPCHelpMan quorum_getrecsig() {"id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Request id."}, {"msgHash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Message hash."}, }, - RPCResults{}, + llmq::CRecoveredSig::GetJsonHelp(/*key=*/"", /*optional=*/false), RPCExamples{""}, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { @@ -847,7 +848,7 @@ static RPCHelpMan quorum_rotationinfo() {"baseBlockHash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"}, }}, }, - RPCResults{}, + llmq::CQuorumRotationInfo::GetJsonHelp(/*key=*/"", /*optional=*/false), RPCExamples{""}, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { From b7eb196545f6f4359bb3afe62f1b61d96f2c0a4e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 6 Oct 2025 20:23:39 +0000 Subject: [PATCH 079/656] rpc: `GetJsonHelp()` defs for `DeterministicMN{,State{,{Diff}}}` --- src/evo/core_write.cpp | 86 ++++++++++++++++++++++++++++++++++++-- src/evo/deterministicmns.h | 6 ++- src/evo/dmnstate.h | 15 +++---- src/rpc/evo.cpp | 2 + src/rpc/masternode.cpp | 1 + 5 files changed, 97 insertions(+), 13 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index 63a988f2ec32..e7b9f3e852d5 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -37,20 +38,31 @@ RPCResult GetRpcResult(const std::string& key, bool optional) {RPCResult::Type::ARR, "platform_https", /*optional=*/true, "Addresses used for Platform HTTPS API", {{RPCResult::Type::STR, "address", ""}}}, }}}, + RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "collateralHash", "Collateral transaction hash"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "collateralIndex", "Collateral transaction output index"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "consecutivePayments", "Consecutive payments masternode has received in payment cycle"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "height", "Block height"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "inputsHash", "Hash of all the outpoints of the transaction inputs"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "lastPaidHeight", "Height masternode was last paid"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "llmqType", "Quorum type"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootMNList", "Merkle root of the masternode list"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootQuorums", "Merkle root of the quorum list"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "operatorPayoutAddress", "Dash address used for operator reward payments"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "operatorReward", "Fraction in %% of reward shared with the operator between 0 and 10000"), + RESULT_MAP_ENTRY(RPCResult::Type::STR, "ownerAddress", "Dash address used for payee updates and proposal voting"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "payoutAddress", "Dash address used for masternode reward payments"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformHTTPPort", "(DEPRECATED) TCP port of Platform HTTP API"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "platformNodeID", "Node ID derived from P2P public key for Platform P2P"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformP2PPort", "(DEPRECATED) TCP port of Platform P2P"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSeBanHeight", "Height masternode was banned for Proof of Service violations"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSePenalty", "Proof of Service penalty score"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSeRevivedHeight", "Height masternode recovered from Proof of Service violations"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "proTxHash", "Hash of the masternode's initial ProRegTx"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "pubKeyOperator", "BLS public key used for operator signing"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"), RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumSig", "BLS recovered threshold signature of quorum"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "registeredHeight", "Height masternode was registered"), + RESULT_MAP_ENTRY(RPCResult::Type::NUM, "revocationReason", "Reason for ProUpRegTx revocation"), RESULT_MAP_ENTRY(RPCResult::Type::STR, "service", "(DEPRECATED) IP address and port of the masternode"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "type", "Masternode type"), RESULT_MAP_ENTRY(RPCResult::Type::NUM, "version", "Special transaction version"), @@ -158,6 +170,46 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } +// CDeterministicMN::ToJson() defined in evo/deterministicmns.cpp +[[nodiscard]] RPCResult CDeterministicMN::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode's details", + { + {RPCResult::Type::STR, "type", "Masternode type"}, + GetRpcResult("proTxHash"), + GetRpcResult("collateralHash"), + GetRpcResult("collateralIndex"), + {RPCResult::Type::STR, "collateralAddress", /*optional=*/true, "Dash address used for collateral"}, + GetRpcResult("operatorReward"), + CDeterministicMNState::GetJsonHelp(/*key=*/"state", /*optional=*/false), + }}; +} + +[[nodiscard]] RPCResult CDeterministicMNState::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode state", + { + {RPCResult::Type::NUM, "version", "Version of the masternode state"}, + GetRpcResult("service"), + GetRpcResult("addresses"), + GetRpcResult("registeredHeight"), + GetRpcResult("lastPaidHeight"), + GetRpcResult("consecutivePayments"), + GetRpcResult("PoSePenalty"), + GetRpcResult("PoSeRevivedHeight"), + GetRpcResult("PoSeBanHeight"), + GetRpcResult("revocationReason"), + GetRpcResult("ownerAddress"), + GetRpcResult("votingAddress"), + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("platformP2PPort", /*optional=*/true), + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("pubKeyOperator"), + GetRpcResult("operatorPayoutAddress", /*optional=*/true), + }}; +} + [[nodiscard]] UniValue CDeterministicMNState::ToJson(MnType nType) const { UniValue obj(UniValue::VOBJ); @@ -190,21 +242,47 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } +// CDeterministicMNStateDiff::ToJson() defined in evo/dmnstate.cpp +[[nodiscard]] RPCResult CDeterministicMNStateDiff::GetJsonHelp(const std::string& key, bool optional) +{ + return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode state diff", + { + {RPCResult::Type::NUM, "version", "Version of the masternode state diff"}, + GetRpcResult("service", /*optional=*/true), + GetRpcResult("registeredHeight", /*optional=*/true), + GetRpcResult("lastPaidHeight", /*optional=*/true), + GetRpcResult("consecutivePayments", /*optional=*/true), + GetRpcResult("PoSePenalty", /*optional=*/true), + GetRpcResult("PoSeRevivedHeight", /*optional=*/true), + GetRpcResult("PoSeBanHeight", /*optional=*/true), + GetRpcResult("revocationReason", /*optional=*/true), + GetRpcResult("ownerAddress", /*optional=*/true), + GetRpcResult("votingAddress", /*optional=*/true), + GetRpcResult("payoutAddress", /*optional=*/true), + GetRpcResult("operatorPayoutAddress", /*optional=*/true), + GetRpcResult("pubKeyOperator", /*optional=*/true), + GetRpcResult("platformNodeID", /*optional=*/true), + GetRpcResult("platformP2PPort", /*optional=*/true), + GetRpcResult("platformHTTPPort", /*optional=*/true), + GetRpcResult("addresses", /*optional=*/true), + }}; +} + [[nodiscard]] RPCResult CProRegTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode registration special transaction", { GetRpcResult("version"), GetRpcResult("type"), - {RPCResult::Type::STR_HEX, "collateralHash", "Collateral transaction hash"}, - {RPCResult::Type::NUM, "collateralIndex", "Collateral transaction output index"}, + GetRpcResult("collateralHash"), + GetRpcResult("collateralIndex"), GetRpcResult("service"), GetRpcResult("addresses"), - {RPCResult::Type::STR, "ownerAddress", "Dash address used for payee updates and proposal voting"}, + GetRpcResult("ownerAddress"), GetRpcResult("votingAddress"), GetRpcResult("payoutAddress", /*optional=*/true), GetRpcResult("pubKeyOperator"), - {RPCResult::Type::NUM, "operatorReward", "Fraction in %% of reward shared with the operator between 0 and 10000"}, + GetRpcResult("operatorReward"), GetRpcResult("platformNodeID", /*optional=*/true), GetRpcResult("platformP2PPort", /*optional=*/true), GetRpcResult("platformHTTPPort", /*optional=*/true), diff --git a/src/evo/deterministicmns.h b/src/evo/deterministicmns.h index 12643e5f1f1a..6fdf3604e9c3 100644 --- a/src/evo/deterministicmns.h +++ b/src/evo/deterministicmns.h @@ -5,9 +5,8 @@ #ifndef BITCOIN_EVO_DETERMINISTICMNS_H #define BITCOIN_EVO_DETERMINISTICMNS_H -#include - #include +#include #include #include @@ -36,6 +35,7 @@ class CSimplifiedMNList; class CSimplifiedMNListEntry; class CMasternodeMetaMan; class TxValidationState; +struct RPCResult; extern RecursiveMutex cs_main; @@ -86,6 +86,8 @@ class CDeterministicMN [[nodiscard]] CSimplifiedMNListEntry to_sml_entry() const; [[nodiscard]] std::string ToString() const; + + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson() const; }; diff --git a/src/evo/dmnstate.h b/src/evo/dmnstate.h index d63460b2d931..7fa609465002 100644 --- a/src/evo/dmnstate.h +++ b/src/evo/dmnstate.h @@ -19,16 +19,15 @@ #include #include -class CProRegTx; -class UniValue; - class CDeterministicMNState; - -namespace llmq -{ - class CFinalCommitment; +class CProRegTx; +struct RPCResult; +namespace llmq { +class CFinalCommitment; } // namespace llmq +class UniValue; + class CDeterministicMNState { private: @@ -150,6 +149,7 @@ class CDeterministicMNState public: std::string ToString() const; + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson(MnType nType) const; }; @@ -243,6 +243,7 @@ class CDeterministicMNStateDiff template CDeterministicMNStateDiff(deserialize_type, Stream& s) { s >> *this; } + [[nodiscard]] static RPCResult GetJsonHelp(const std::string& key, bool optional); [[nodiscard]] UniValue ToJson(MnType nType) const; SERIALIZE_METHODS(CDeterministicMNStateDiff, obj) diff --git a/src/rpc/evo.cpp b/src/rpc/evo.cpp index 9907f19153f8..3e0dcc641636 100644 --- a/src/rpc/evo.cpp +++ b/src/rpc/evo.cpp @@ -1679,6 +1679,7 @@ static RPCHelpMan protx_listdiff() for(const auto& mn : mnDiff.addedMNs) { jaddedMNs.push_back(mn->ToJson()); } + // TODO: Use CDeterministicMN::GetJsonHelp() for mn ret.pushKV("addedMNs", jaddedMNs); UniValue jremovedMNs(UniValue::VARR); @@ -1701,6 +1702,7 @@ static RPCHelpMan protx_listdiff() obj.pushKV(dmn->proTxHash.ToString(), stateDiff.ToJson(dmn->nType)); jupdatedMNs.push_back(obj); } + // TODO: Use CDeterministicMNStateDiff::GetJsonHelp() for stateDiff ret.pushKV("updatedMNs", jupdatedMNs); return ret; diff --git a/src/rpc/masternode.cpp b/src/rpc/masternode.cpp index b87670e3766c..727b20e9ebf5 100644 --- a/src/rpc/masternode.cpp +++ b/src/rpc/masternode.cpp @@ -177,6 +177,7 @@ static RPCHelpMan masternode_status() mnObj.pushKV("type", std::string(GetMnType(dmn->nType).description)); mnObj.pushKV("collateralHash", dmn->collateralOutpoint.hash.ToString()); mnObj.pushKV("collateralIndex", dmn->collateralOutpoint.n); + // TODO: Use CDeterministicMNState::GetJsonHelp() for dmnState mnObj.pushKV("dmnState", dmn->pdmnState->ToJson(dmn->nType)); } mnObj.pushKV("state", node.mn_activeman->GetStateString()); From f01930b975f61be2af5c1229df6766d714bdc329 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 5 Oct 2025 12:55:46 +0000 Subject: [PATCH 080/656] chore: remove redundant `[[nodiscard]]` attributions in source file --- src/evo/core_write.cpp | 52 ++++++++++++++++++++--------------------- src/llmq/core_write.cpp | 20 ++++++++-------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index e7b9f3e852d5..eef000b1b2be 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -78,7 +78,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) __FILE__, __LINE__, __func__); } -[[nodiscard]] RPCResult CAssetLockPayload::GetJsonHelp(const std::string& key, bool optional) +RPCResult CAssetLockPayload::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The asset lock special transaction", { @@ -95,7 +95,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CAssetLockPayload::ToJson() const +UniValue CAssetLockPayload::ToJson() const { UniValue outputs(UniValue::VARR); for (const CTxOut& credit_output : creditOutputs) { @@ -114,7 +114,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CAssetUnlockPayload::GetJsonHelp(const std::string& key, bool optional) +RPCResult CAssetUnlockPayload::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The asset unlock special transaction", { @@ -127,7 +127,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CAssetUnlockPayload::ToJson() const +UniValue CAssetUnlockPayload::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -139,7 +139,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CCbTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult CCbTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The coinbase special transaction", { @@ -153,7 +153,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CCbTx::ToJson() const +UniValue CCbTx::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", ToUnderlying(nVersion)); @@ -171,7 +171,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) } // CDeterministicMN::ToJson() defined in evo/deterministicmns.cpp -[[nodiscard]] RPCResult CDeterministicMN::GetJsonHelp(const std::string& key, bool optional) +RPCResult CDeterministicMN::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode's details", { @@ -185,7 +185,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] RPCResult CDeterministicMNState::GetJsonHelp(const std::string& key, bool optional) +RPCResult CDeterministicMNState::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode state", { @@ -210,7 +210,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CDeterministicMNState::ToJson(MnType nType) const +UniValue CDeterministicMNState::ToJson(MnType nType) const { UniValue obj(UniValue::VOBJ); obj.pushKV("version", nVersion); @@ -243,7 +243,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) } // CDeterministicMNStateDiff::ToJson() defined in evo/dmnstate.cpp -[[nodiscard]] RPCResult CDeterministicMNStateDiff::GetJsonHelp(const std::string& key, bool optional) +RPCResult CDeterministicMNStateDiff::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode state diff", { @@ -268,7 +268,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] RPCResult CProRegTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult CProRegTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode registration special transaction", { @@ -290,7 +290,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CProRegTx::ToJson() const +UniValue CProRegTx::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -315,7 +315,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CProUpRegTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult CProUpRegTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode update registrar special transaction", { @@ -328,7 +328,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CProUpRegTx::ToJson() const +UniValue CProUpRegTx::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -342,7 +342,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CProUpRevTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult CProUpRevTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode operator revocation special transaction", { @@ -353,7 +353,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CProUpRevTx::ToJson() const +UniValue CProUpRevTx::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -363,7 +363,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CProUpServTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult CProUpServTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode update service special transaction", { @@ -380,7 +380,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CProUpServTx::ToJson() const +UniValue CProUpServTx::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -400,7 +400,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return ret; } -[[nodiscard]] RPCResult CSimplifiedMNListDiff::GetJsonHelp(const std::string& key, bool optional) +RPCResult CSimplifiedMNListDiff::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list diff", { @@ -430,7 +430,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CSimplifiedMNListDiff::ToJson(bool extended) const +UniValue CSimplifiedMNListDiff::ToJson(bool extended) const { UniValue obj(UniValue::VOBJ); @@ -493,7 +493,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } -[[nodiscard]] RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) +RPCResult CSimplifiedMNListEntry::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The simplified masternode list entry", { @@ -513,7 +513,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue CSimplifiedMNListEntry::ToJson(bool extended) const +UniValue CSimplifiedMNListEntry::ToJson(bool extended) const { UniValue obj(UniValue::VOBJ); obj.pushKV("nVersion", nVersion); @@ -542,7 +542,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } -[[nodiscard]] RPCResult MNHFTx::GetJsonHelp(const std::string& key, bool optional) +RPCResult MNHFTx::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode hard fork payload", { @@ -552,7 +552,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue MNHFTx::ToJson() const +UniValue MNHFTx::ToJson() const { UniValue obj(UniValue::VOBJ); obj.pushKV("versionBit", versionBit); @@ -561,7 +561,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) return obj; } -[[nodiscard]] RPCResult MNHFTxPayload::GetJsonHelp(const std::string& key, bool optional) +RPCResult MNHFTxPayload::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The masternode hard fork signal special transaction", { @@ -570,7 +570,7 @@ RPCResult GetRpcResult(const std::string& key, bool optional) }}; } -[[nodiscard]] UniValue MNHFTxPayload::ToJson() const +UniValue MNHFTxPayload::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); diff --git a/src/llmq/core_write.cpp b/src/llmq/core_write.cpp index 8a8137ed7072..5ffe3c46f956 100644 --- a/src/llmq/core_write.cpp +++ b/src/llmq/core_write.cpp @@ -15,7 +15,7 @@ #include namespace llmq { -[[nodiscard]] RPCResult CFinalCommitment::GetJsonHelp(const std::string& key, bool optional) +RPCResult CFinalCommitment::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment payload", { @@ -34,7 +34,7 @@ namespace llmq { }}; } -[[nodiscard]] UniValue CFinalCommitment::ToJson() const +UniValue CFinalCommitment::ToJson() const { UniValue obj(UniValue::VOBJ); obj.pushKV("version", nVersion); @@ -52,7 +52,7 @@ namespace llmq { return obj; } -[[nodiscard]] RPCResult CFinalCommitmentTxPayload::GetJsonHelp(const std::string& key, bool optional) +RPCResult CFinalCommitmentTxPayload::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum commitment special transaction", { @@ -62,7 +62,7 @@ namespace llmq { }}; } -[[nodiscard]] UniValue CFinalCommitmentTxPayload::ToJson() const +UniValue CFinalCommitmentTxPayload::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("version", nVersion); @@ -71,7 +71,7 @@ namespace llmq { return ret; } -[[nodiscard]] RPCResult CQuorumRotationInfo::GetJsonHelp(const std::string& key, bool optional) +RPCResult CQuorumRotationInfo::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum rotation", { @@ -98,7 +98,7 @@ namespace llmq { }}; } -[[nodiscard]] UniValue CQuorumRotationInfo::ToJson() const +UniValue CQuorumRotationInfo::ToJson() const { UniValue obj(UniValue::VOBJ); obj.pushKV("extraShare", extraShare); @@ -140,7 +140,7 @@ namespace llmq { return obj; } -[[nodiscard]] RPCResult CQuorumSnapshot::GetJsonHelp(const std::string& key, bool optional) +RPCResult CQuorumSnapshot::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The quorum snapshot", { @@ -154,7 +154,7 @@ namespace llmq { }}; } -[[nodiscard]] UniValue CQuorumSnapshot::ToJson() const +UniValue CQuorumSnapshot::ToJson() const { UniValue obj(UniValue::VOBJ); UniValue activeQ(UniValue::VARR); @@ -173,7 +173,7 @@ namespace llmq { return obj; } -[[nodiscard]] RPCResult CRecoveredSig::GetJsonHelp(const std::string& key, bool optional) +RPCResult CRecoveredSig::GetJsonHelp(const std::string& key, bool optional) { return {RPCResult::Type::OBJ, key, optional, key.empty() ? "" : "The recovered signature", { @@ -186,7 +186,7 @@ namespace llmq { }}; } -[[nodiscard]] UniValue CRecoveredSig::ToJson() const +UniValue CRecoveredSig::ToJson() const { UniValue ret(UniValue::VOBJ); ret.pushKV("llmqType", ToUnderlying(llmqType)); From 56514c767d6faa01d0076136a3d37bb5158f6125 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 5 Oct 2025 15:08:43 +0000 Subject: [PATCH 081/656] chore: move RpcResult map outside `GetRpcResult()` We are using it extensively and could do with some cleanup --- src/evo/core_write.cpp | 96 +++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/src/evo/core_write.cpp b/src/evo/core_write.cpp index eef000b1b2be..4e83e8bc4630 100644 --- a/src/evo/core_write.cpp +++ b/src/evo/core_write.cpp @@ -24,56 +24,58 @@ #include #include +namespace { +#define RESULT_MAP_ENTRY(name, type, desc) {name, {type, name, desc}} +const std::map RPCRESULT_MAP{{ + {"addresses", + {RPCResult::Type::OBJ, "addresses", "Network addresses of the masternode", + { + {RPCResult::Type::ARR, "core_p2p", /*optional=*/true, "Addresses used for protocol P2P", + {{RPCResult::Type::STR, "address", ""}}}, + {RPCResult::Type::ARR, "platform_p2p", /*optional=*/true, "Addresses used for Platform P2P", + {{RPCResult::Type::STR, "address", ""}}}, + {RPCResult::Type::ARR, "platform_https", /*optional=*/true, "Addresses used for Platform HTTPS API", + {{RPCResult::Type::STR, "address", ""}}}, + }}}, + RESULT_MAP_ENTRY("collateralHash", RPCResult::Type::STR_HEX, "Collateral transaction hash"), + RESULT_MAP_ENTRY("collateralIndex", RPCResult::Type::NUM, "Collateral transaction output index"), + RESULT_MAP_ENTRY("consecutivePayments", RPCResult::Type::NUM, "Consecutive payments masternode has received in payment cycle"), + RESULT_MAP_ENTRY("height", RPCResult::Type::NUM, "Block height"), + RESULT_MAP_ENTRY("inputsHash", RPCResult::Type::STR_HEX, "Hash of all the outpoints of the transaction inputs"), + RESULT_MAP_ENTRY("lastPaidHeight", RPCResult::Type::NUM, "Height masternode was last paid"), + RESULT_MAP_ENTRY("llmqType", RPCResult::Type::NUM, "Quorum type"), + RESULT_MAP_ENTRY("merkleRootMNList", RPCResult::Type::STR_HEX, "Merkle root of the masternode list"), + RESULT_MAP_ENTRY("merkleRootQuorums", RPCResult::Type::STR_HEX, "Merkle root of the quorum list"), + RESULT_MAP_ENTRY("operatorPayoutAddress", RPCResult::Type::STR, "Dash address used for operator reward payments"), + RESULT_MAP_ENTRY("operatorReward", RPCResult::Type::NUM, "Fraction in %% of reward shared with the operator between 0 and 10000"), + RESULT_MAP_ENTRY("ownerAddress", RPCResult::Type::STR, "Dash address used for payee updates and proposal voting"), + RESULT_MAP_ENTRY("payoutAddress", RPCResult::Type::STR, "Dash address used for masternode reward payments"), + RESULT_MAP_ENTRY("platformHTTPPort", RPCResult::Type::NUM, "(DEPRECATED) TCP port of Platform HTTP API"), + RESULT_MAP_ENTRY("platformNodeID", RPCResult::Type::STR_HEX, "Node ID derived from P2P public key for Platform P2P"), + RESULT_MAP_ENTRY("platformP2PPort", RPCResult::Type::NUM, "(DEPRECATED) TCP port of Platform P2P"), + RESULT_MAP_ENTRY("PoSeBanHeight", RPCResult::Type::NUM, "Height masternode was banned for Proof of Service violations"), + RESULT_MAP_ENTRY("PoSePenalty", RPCResult::Type::NUM, "Proof of Service penalty score"), + RESULT_MAP_ENTRY("PoSeRevivedHeight", RPCResult::Type::NUM, "Height masternode recovered from Proof of Service violations"), + RESULT_MAP_ENTRY("proTxHash", RPCResult::Type::STR_HEX, "Hash of the masternode's initial ProRegTx"), + RESULT_MAP_ENTRY("pubKeyOperator", RPCResult::Type::STR, "BLS public key used for operator signing"), + RESULT_MAP_ENTRY("quorumHash", RPCResult::Type::STR_HEX, "Hash of the quorum"), + RESULT_MAP_ENTRY("quorumSig", RPCResult::Type::STR_HEX, "BLS recovered threshold signature of quorum"), + RESULT_MAP_ENTRY("registeredHeight", RPCResult::Type::NUM, "Height masternode was registered"), + RESULT_MAP_ENTRY("revocationReason", RPCResult::Type::NUM, "Reason for ProUpRegTx revocation"), + RESULT_MAP_ENTRY("service", RPCResult::Type::STR, "(DEPRECATED) IP address and port of the masternode"), + RESULT_MAP_ENTRY("type", RPCResult::Type::NUM, "Masternode type"), + RESULT_MAP_ENTRY("version", RPCResult::Type::NUM, "Special transaction version"), + RESULT_MAP_ENTRY("votingAddress", RPCResult::Type::STR, "Dash address used for voting"), +}}; +#undef RESULT_MAP_ENTRY +} // anonymous namespace + RPCResult GetRpcResult(const std::string& key, bool optional) { -#define RESULT_MAP_ENTRY(type, name, desc) {name, {type, name, optional, desc}} - const std::map result_map{{ - {"addresses", - {RPCResult::Type::OBJ, "addresses", optional, "Network addresses of the masternode", - { - {RPCResult::Type::ARR, "core_p2p", /*optional=*/true, "Addresses used for protocol P2P", - {{RPCResult::Type::STR, "address", ""}}}, - {RPCResult::Type::ARR, "platform_p2p", /*optional=*/true, "Addresses used for Platform P2P", - {{RPCResult::Type::STR, "address", ""}}}, - {RPCResult::Type::ARR, "platform_https", /*optional=*/true, "Addresses used for Platform HTTPS API", - {{RPCResult::Type::STR, "address", ""}}}, - }}}, - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "collateralHash", "Collateral transaction hash"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "collateralIndex", "Collateral transaction output index"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "consecutivePayments", "Consecutive payments masternode has received in payment cycle"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "height", "Block height"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "inputsHash", "Hash of all the outpoints of the transaction inputs"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "lastPaidHeight", "Height masternode was last paid"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "llmqType", "Quorum type"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootMNList", "Merkle root of the masternode list"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "merkleRootQuorums", "Merkle root of the quorum list"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "operatorPayoutAddress", "Dash address used for operator reward payments"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "operatorReward", "Fraction in %% of reward shared with the operator between 0 and 10000"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "ownerAddress", "Dash address used for payee updates and proposal voting"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "payoutAddress", "Dash address used for masternode reward payments"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformHTTPPort", "(DEPRECATED) TCP port of Platform HTTP API"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "platformNodeID", "Node ID derived from P2P public key for Platform P2P"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "platformP2PPort", "(DEPRECATED) TCP port of Platform P2P"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSeBanHeight", "Height masternode was banned for Proof of Service violations"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSePenalty", "Proof of Service penalty score"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "PoSeRevivedHeight", "Height masternode recovered from Proof of Service violations"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "proTxHash", "Hash of the masternode's initial ProRegTx"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "pubKeyOperator", "BLS public key used for operator signing"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumHash", "Hash of the quorum"), - RESULT_MAP_ENTRY(RPCResult::Type::STR_HEX, "quorumSig", "BLS recovered threshold signature of quorum"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "registeredHeight", "Height masternode was registered"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "revocationReason", "Reason for ProUpRegTx revocation"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "service", "(DEPRECATED) IP address and port of the masternode"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "type", "Masternode type"), - RESULT_MAP_ENTRY(RPCResult::Type::NUM, "version", "Special transaction version"), - RESULT_MAP_ENTRY(RPCResult::Type::STR, "votingAddress", "Dash address used for voting"), - }}; -#undef RESULT_MAP_ENTRY - - if (const auto it = result_map.find(key); it != result_map.end()) { - return it->second; + if (const auto it = RPCRESULT_MAP.find(key); it != RPCRESULT_MAP.end()) { + const auto& ret{it->second}; + return RPCResult{ret.m_type, ret.m_key_name, optional, ret.m_description, ret.m_inner}; } - throw NonFatalCheckError(strprintf("Requested invalid RPCResult for nonexistent key \"%s\"", key).c_str(), __FILE__, __LINE__, __func__); } From ce7d354f2bf9ff50adee845162a4fc5291a03c6b Mon Sep 17 00:00:00 2001 From: laanwj <126646+laanwj@users.noreply.github.com> Date: Tue, 26 Apr 2022 15:10:22 +0200 Subject: [PATCH 082/656] Merge bitcoin/bitcoin#24959: Remove not needed clang-format off comments fa870e3d4ccd6dfd0a9a8f2c608721a7251114e2 Remove not needed clang-format off comments (MarcoFalke) Pull request description: It seems odd to disable clang-format and force manual formatting when there is no need for it. So remove the clang-format comments and other unneeded comments. Can be reviewed with `--word-diff-regex=. --ignore-all-space` Looks like this was initially added in commit d9d79576f423cd9c5cef4547c7e3648dbb339460 to accommodate a linter that has since been removed and replaced by a functional test. ACKs for top commit: laanwj: Code review ACK fa870e3d4ccd6dfd0a9a8f2c608721a7251114e2 fanquake: ACK fa870e3d4ccd6dfd0a9a8f2c608721a7251114e2 Tree-SHA512: 0f8f97c12f5dbe517dd96c10b10ce1b8772d8daed33e6b41f73ea1040e89888cf3b8c0ad7b20319e366fe30c71e8b181c89098ae7f6a3deb8647e1b4731db815 --- src/rpc/blockchain.cpp | 76 +++++++++---------- src/rpc/coinjoin.cpp | 38 ++++------ src/rpc/evo.cpp | 71 ++++++++---------- src/rpc/governance.cpp | 49 +++++-------- src/rpc/masternode.cpp | 34 ++++----- src/rpc/mempool.cpp | 2 - src/rpc/mining.cpp | 30 +++----- src/rpc/net.cpp | 45 +++++------- src/rpc/quorums.cpp | 48 ++++++------ src/rpc/rawtransaction.cpp | 46 ++++++------ src/rpc/server.cpp | 15 ++-- src/rpc/txoutproof.cpp | 2 - src/wallet/rpc/wallet.cpp | 146 ++++++++++++++++++------------------- src/zmq/zmqrpc.cpp | 6 +- 14 files changed, 267 insertions(+), 341 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 35e4d54c1f25..92c0f1fa31ed 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2759,48 +2759,40 @@ UniValue CreateUTXOSnapshot( return result; } - -void RegisterBlockchainRPCCommands(CRPCTable &t) -{ -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ------------------------ - { "blockchain", &getblockchaininfo, }, - { "blockchain", &getchaintxstats, }, - { "blockchain", &getblockstats, }, - { "blockchain", &getbestblockhash, }, - { "blockchain", &getbestchainlock, }, - { "blockchain", &getblockcount, }, - { "blockchain", &getblock, }, - { "blockchain", &getblockfrompeer, }, - { "blockchain", &getblockhashes, }, - { "blockchain", &getblockhash, }, - { "blockchain", &getblockheader, }, - { "blockchain", &getblockheaders, }, - { "blockchain", &getmerkleblocks, }, - { "blockchain", &getchaintips, }, - { "blockchain", &getdifficulty, }, - { "blockchain", &getspecialtxes, }, - { "blockchain", &gettxout, }, - { "blockchain", &gettxoutsetinfo, }, - { "blockchain", &pruneblockchain, }, - { "blockchain", &verifychain, }, - - { "blockchain", &preciousblock, }, - { "blockchain", &scantxoutset, }, - { "blockchain", &getblockfilter, }, - - /* Not shown in help */ - { "hidden", &invalidateblock, }, - { "hidden", &reconsiderblock, }, - { "hidden", &waitfornewblock, }, - { "hidden", &waitforblock, }, - { "hidden", &waitforblockheight, }, - { "hidden", &syncwithvalidationinterfacequeue, }, - { "hidden", &dumptxoutset, }, -}; -// clang-format on +void RegisterBlockchainRPCCommands(CRPCTable& t) +{ + static const CRPCCommand commands[]{ + {"blockchain", &getblockchaininfo}, + {"blockchain", &getchaintxstats}, + {"blockchain", &getblockstats}, + {"blockchain", &getbestblockhash}, + {"blockchain", &getbestchainlock}, + {"blockchain", &getblockcount}, + {"blockchain", &getblock}, + {"blockchain", &getblockfrompeer}, + {"blockchain", &getblockhashes}, + {"blockchain", &getblockhash}, + {"blockchain", &getblockheader}, + {"blockchain", &getblockheaders}, + {"blockchain", &getmerkleblocks}, + {"blockchain", &getchaintips}, + {"blockchain", &getdifficulty}, + {"blockchain", &getspecialtxes}, + {"blockchain", &gettxout}, + {"blockchain", &gettxoutsetinfo}, + {"blockchain", &pruneblockchain}, + {"blockchain", &verifychain}, + {"blockchain", &preciousblock}, + {"blockchain", &scantxoutset}, + {"blockchain", &getblockfilter}, + {"hidden", &invalidateblock}, + {"hidden", &reconsiderblock}, + {"hidden", &waitfornewblock}, + {"hidden", &waitforblock}, + {"hidden", &waitforblockheight}, + {"hidden", &syncwithvalidationinterfacequeue}, + {"hidden", &dumptxoutset}, + }; for (const auto& c : commands) { t.appendCommand(c.name, &c); } diff --git a/src/rpc/coinjoin.cpp b/src/rpc/coinjoin.cpp index e0367734467a..ef578d726101 100644 --- a/src/rpc/coinjoin.cpp +++ b/src/rpc/coinjoin.cpp @@ -503,35 +503,27 @@ static RPCHelpMan getcoinjoininfo() #ifdef ENABLE_WALLET Span GetWalletCoinJoinRPCCommands() { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "dash", &coinjoin, }, - { "dash", &coinjoin_reset, }, - { "dash", &coinjoin_start, }, - { "dash", &coinjoin_status, }, - { "dash", &coinjoin_stop, }, - { "dash", &coinjoinsalt, }, - { "dash", &coinjoinsalt_generate, }, - { "dash", &coinjoinsalt_get, }, - { "dash", &coinjoinsalt_set, }, - { "dash", &getcoinjoininfo, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"dash", &coinjoin}, + {"dash", &coinjoin_reset}, + {"dash", &coinjoin_start}, + {"dash", &coinjoin_status}, + {"dash", &coinjoin_stop}, + {"dash", &coinjoinsalt}, + {"dash", &coinjoinsalt_generate}, + {"dash", &coinjoinsalt_get}, + {"dash", &coinjoinsalt_set}, + {"dash", &getcoinjoininfo}, + }; return commands; } #endif // ENABLE_WALLET void RegisterCoinJoinRPCCommands(CRPCTable& t) { -// clang-format off -static const CRPCCommand commands_wallet[] = -{ // category actor (function) - // --------------------- ----------------------- - { "dash", &getcoinjoininfo, }, -}; -// clang-format on + static const CRPCCommand commands_wallet[]{ + {"dash", &getcoinjoininfo}, + }; // If we aren't compiling with wallet support, we still need to register RPCs that are // capable of working without wallet support. We have to do this even if wallet support // is compiled in but is disabled at runtime because runtime disablement prohibits diff --git a/src/rpc/evo.cpp b/src/rpc/evo.cpp index 3e0dcc641636..81bf1ecd7814 100644 --- a/src/rpc/evo.cpp +++ b/src/rpc/evo.cpp @@ -1845,52 +1845,43 @@ static RPCHelpMan bls_help() #ifdef ENABLE_WALLET Span GetWalletEvoRPCCommands() { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "evo", &protx_list, }, - { "evo", &protx_info, }, - { "evo", &protx_register, }, - { "evo", &protx_register_evo, }, - { "evo", &protx_register_fund, }, - { "evo", &protx_register_fund_evo, }, - { "evo", &protx_register_prepare, }, - { "evo", &protx_register_prepare_evo, }, - { "evo", &protx_update_service, }, - { "evo", &protx_update_service_evo, }, - { "evo", &protx_register_submit, }, - { "evo", &protx_update_registrar, }, - { "evo", &protx_revoke, }, - { "hidden", &protx_register_legacy, }, - { "hidden", &protx_register_fund_legacy, }, - { "hidden", &protx_register_prepare_legacy, }, - { "hidden", &protx_update_registrar_legacy, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"evo", &protx_list}, + {"evo", &protx_info}, + {"evo", &protx_register}, + {"evo", &protx_register_evo}, + {"evo", &protx_register_fund}, + {"evo", &protx_register_fund_evo}, + {"evo", &protx_register_prepare}, + {"evo", &protx_register_prepare_evo}, + {"evo", &protx_update_service}, + {"evo", &protx_update_service_evo}, + {"evo", &protx_register_submit}, + {"evo", &protx_update_registrar}, + {"evo", &protx_revoke}, + {"hidden", &protx_register_legacy}, + {"hidden", &protx_register_fund_legacy}, + {"hidden", &protx_register_prepare_legacy}, + {"hidden", &protx_update_registrar_legacy}, + }; return commands; } #endif // ENABLE_WALLET void RegisterEvoRPCCommands(CRPCTable& tableRPC) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "evo", &bls_help, }, - { "evo", &bls_generate, }, - { "evo", &bls_fromsecret, }, - { "evo", &protx_help, }, - { "evo", &protx_diff, }, - { "evo", &protx_listdiff, }, -}; -static const CRPCCommand commands_wallet[] = -{ - { "evo", &protx_list, }, - { "evo", &protx_info, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"evo", &bls_help}, + {"evo", &bls_generate}, + {"evo", &bls_fromsecret}, + {"evo", &protx_help}, + {"evo", &protx_diff}, + {"evo", &protx_listdiff}, + }; + static const CRPCCommand commands_wallet[]{ + {"evo", &protx_list}, + {"evo", &protx_info}, + }; for (const auto& command : commands) { tableRPC.appendCommand(command.name, &command); } diff --git a/src/rpc/governance.cpp b/src/rpc/governance.cpp index ea66057b457c..abbffc93804a 100644 --- a/src/rpc/governance.cpp +++ b/src/rpc/governance.cpp @@ -1052,41 +1052,32 @@ static RPCHelpMan getsuperblockbudget() #ifdef ENABLE_WALLET Span GetWalletGovernanceRPCCommands() { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "dash", &gobject_prepare, }, - { "dash", &gobject_list_prepared, }, - { "dash", &gobject_vote_many, }, - { "dash", &gobject_vote_alias, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"dash", &gobject_prepare}, + {"dash", &gobject_list_prepared}, + {"dash", &gobject_vote_many}, + {"dash", &gobject_vote_alias}, + }; return commands; } #endif // ENABLE_WALLET void RegisterGovernanceRPCCommands(CRPCTable &t) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - /* Dash features */ - { "dash", &getgovernanceinfo, }, - { "dash", &getsuperblockbudget, }, - { "dash", &gobject, }, - { "dash", &gobject_count, }, - { "dash", &gobject_deserialize, }, - { "dash", &gobject_check, }, - { "dash", &gobject_submit, }, - { "dash", &gobject_list, }, - { "dash", &gobject_diff, }, - { "dash", &gobject_get, }, - { "dash", &gobject_getcurrentvotes, }, - { "dash", &voteraw, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"dash", &getgovernanceinfo}, + {"dash", &getsuperblockbudget}, + {"dash", &gobject}, + {"dash", &gobject_count}, + {"dash", &gobject_deserialize}, + {"dash", &gobject_check}, + {"dash", &gobject_submit}, + {"dash", &gobject_list}, + {"dash", &gobject_diff}, + {"dash", &gobject_get}, + {"dash", &gobject_getcurrentvotes}, + {"dash", &voteraw}, + }; for (const auto& command : commands) { t.appendCommand(command.name, &command); } diff --git a/src/rpc/masternode.cpp b/src/rpc/masternode.cpp index 727b20e9ebf5..d3fc88b11f97 100644 --- a/src/rpc/masternode.cpp +++ b/src/rpc/masternode.cpp @@ -679,33 +679,25 @@ static RPCHelpMan masternodelist_composite() #ifdef ENABLE_WALLET Span GetWalletMasternodeRPCCommands() { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "dash", &masternode_outputs, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"dash", &masternode_outputs}, + }; return commands; } #endif // ENABLE_WALLET void RegisterMasternodeRPCCommands(CRPCTable &t) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "dash", &masternode_help, }, - { "dash", &masternodelist_composite, }, - { "dash", &masternodelist, }, - { "dash", &masternode_connect, }, - { "dash", &masternode_count, }, - { "dash", &masternode_status, }, - { "dash", &masternode_payments, }, - { "dash", &masternode_winners, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"dash", &masternode_help}, + {"dash", &masternodelist_composite}, + {"dash", &masternodelist}, + {"dash", &masternode_connect}, + {"dash", &masternode_count}, + {"dash", &masternode_status}, + {"dash", &masternode_payments}, + {"dash", &masternode_winners}, + }; for (const auto& command : commands) { t.appendCommand(command.name, &command); } diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 165ea861df88..929ac1037099 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -683,8 +683,6 @@ static RPCHelpMan savemempool() void RegisterMempoolRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ - // category actor (function) - // -------- ---------------- {"rawtransactions", &sendrawtransaction}, {"rawtransactions", &testmempoolaccept}, {"blockchain", &getmempoolancestors}, diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index d055d17d7bfd..8755ae72b33f 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1094,24 +1094,18 @@ static RPCHelpMan submitheader() void RegisterMiningRPCCommands(CRPCTable& t) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "mining", &getnetworkhashps, }, - { "mining", &getmininginfo, }, - { "mining", &prioritisetransaction, }, - { "mining", &getblocktemplate, }, - { "mining", &submitblock, }, - { "mining", &submitheader, }, - - { "hidden", &generatetoaddress, }, - { "hidden", &generatetodescriptor, }, - { "hidden", &generateblock, }, - - { "hidden", &generate, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"mining", &getnetworkhashps}, + {"mining", &getmininginfo}, + {"mining", &prioritisetransaction}, + {"mining", &getblocktemplate}, + {"mining", &submitblock}, + {"mining", &submitheader}, + {"hidden", &generatetoaddress}, + {"hidden", &generatetodescriptor}, + {"hidden", &generateblock}, + {"hidden", &generate}, + }; for (const auto& c : commands) { t.appendCommand(c.name, &c); } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index e77eeba6cb8d..fbd72f6c349d 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -1112,31 +1112,26 @@ static RPCHelpMan setmnthreadactive() void RegisterNetRPCCommands(CRPCTable &t) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor - // --------------------- ----------------------- - { "network", &getconnectioncount, }, - { "network", &ping, }, - { "network", &getpeerinfo, }, - { "network", &addnode, }, - { "network", &disconnectnode, }, - { "network", &getaddednodeinfo, }, - { "network", &getnettotals, }, - { "network", &getnetworkinfo, }, - { "network", &setban, }, - { "network", &listbanned, }, - { "network", &clearbanned, }, - { "network", &setnetworkactive, }, - { "network", &getnodeaddresses, }, - - { "hidden", &cleardiscouraged, }, - { "hidden", &addconnection, }, - { "hidden", &addpeeraddress, }, - { "hidden", &sendmsgtopeer }, - { "hidden", &setmnthreadactive }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"network", &getconnectioncount}, + {"network", &ping}, + {"network", &getpeerinfo}, + {"network", &addnode}, + {"network", &disconnectnode}, + {"network", &getaddednodeinfo}, + {"network", &getnettotals}, + {"network", &getnetworkinfo}, + {"network", &setban}, + {"network", &listbanned}, + {"network", &clearbanned}, + {"network", &setnetworkactive}, + {"network", &getnodeaddresses}, + {"hidden", &cleardiscouraged}, + {"hidden", &addconnection}, + {"hidden", &addpeeraddress}, + {"hidden", &sendmsgtopeer}, + {"hidden", &setmnthreadactive}, + }; for (const auto& c : commands) { t.appendCommand(c.name, &c); } diff --git a/src/rpc/quorums.cpp b/src/rpc/quorums.cpp index a9b3b88d3455..b12b738f9046 100644 --- a/src/rpc/quorums.cpp +++ b/src/rpc/quorums.cpp @@ -1155,32 +1155,28 @@ static RPCHelpMan submitchainlock() void RegisterQuorumsRPCCommands(CRPCTable &tableRPC) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "evo", &quorum_help, }, - { "evo", &quorum_list, }, - { "evo", &quorum_list_extended, }, - { "evo", &quorum_info, }, - { "evo", &quorum_dkginfo, }, - { "evo", &quorum_dkgstatus, }, - { "evo", &quorum_memberof, }, - { "evo", &quorum_sign, }, - { "evo", &quorum_platformsign, }, - { "evo", &quorum_verify, }, - { "evo", &quorum_hasrecsig, }, - { "evo", &quorum_getrecsig, }, - { "evo", &quorum_isconflicting, }, - { "evo", &quorum_selectquorum, }, - { "evo", &quorum_dkgsimerror, }, - { "evo", &quorum_getdata, }, - { "evo", &quorum_rotationinfo, }, - { "evo", &submitchainlock, }, - { "evo", &verifychainlock, }, - { "evo", &verifyislock, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"evo", &quorum_help}, + {"evo", &quorum_list}, + {"evo", &quorum_list_extended}, + {"evo", &quorum_info}, + {"evo", &quorum_dkginfo}, + {"evo", &quorum_dkgstatus}, + {"evo", &quorum_memberof}, + {"evo", &quorum_sign}, + {"evo", &quorum_platformsign}, + {"evo", &quorum_verify}, + {"evo", &quorum_hasrecsig}, + {"evo", &quorum_getrecsig}, + {"evo", &quorum_isconflicting}, + {"evo", &quorum_selectquorum}, + {"evo", &quorum_dkgsimerror}, + {"evo", &quorum_getdata}, + {"evo", &quorum_rotationinfo}, + {"evo", &submitchainlock}, + {"evo", &verifychainlock}, + {"evo", &verifyislock}, + }; for (const auto& command : commands) { tableRPC.appendCommand(command.name, &command); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 77de92f39f67..379e56a39e50 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1909,32 +1909,28 @@ static RPCHelpMan analyzepsbt() }; } -void RegisterRawTransactionRPCCommands(CRPCTable &t) +void RegisterRawTransactionRPCCommands(CRPCTable& t) { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // --------------------- ----------------------- - { "rawtransactions", &getassetunlockstatuses, }, - { "rawtransactions", &getrawtransaction, }, - { "rawtransactions", &getrawtransactionmulti, }, - { "rawtransactions", &getislocks, }, - { "rawtransactions", &gettxchainlocks, }, - { "rawtransactions", &createrawtransaction, }, - { "rawtransactions", &decoderawtransaction, }, - { "rawtransactions", &decodescript, }, - { "rawtransactions", &combinerawtransaction, }, - { "rawtransactions", &signrawtransactionwithkey, }, - { "rawtransactions", &decodepsbt, }, - { "rawtransactions", &combinepsbt, }, - { "rawtransactions", &finalizepsbt, }, - { "rawtransactions", &createpsbt, }, - { "rawtransactions", &converttopsbt, }, - { "rawtransactions", &utxoupdatepsbt, }, - { "rawtransactions", &joinpsbts, }, - { "rawtransactions", &analyzepsbt, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"rawtransactions", &getassetunlockstatuses}, + {"rawtransactions", &getrawtransaction}, + {"rawtransactions", &getrawtransactionmulti}, + {"rawtransactions", &getislocks}, + {"rawtransactions", &gettxchainlocks}, + {"rawtransactions", &createrawtransaction}, + {"rawtransactions", &decoderawtransaction}, + {"rawtransactions", &decodescript}, + {"rawtransactions", &combinerawtransaction}, + {"rawtransactions", &signrawtransactionwithkey}, + {"rawtransactions", &decodepsbt}, + {"rawtransactions", &combinepsbt}, + {"rawtransactions", &finalizepsbt}, + {"rawtransactions", &createpsbt}, + {"rawtransactions", &converttopsbt}, + {"rawtransactions", &utxoupdatepsbt}, + {"rawtransactions", &joinpsbts}, + {"rawtransactions", &analyzepsbt}, + }; for (const auto& c : commands) { t.appendCommand(c.name, &c); } diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 80e07ad9b1d5..55c79374acea 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -266,17 +266,14 @@ static RPCHelpMan getrpcinfo() } }; } -// clang-format off -static const CRPCCommand vRPCCommands[] = -{ // category actor (function) - // --------------------- ----------------------- + +static const CRPCCommand vRPCCommands[]{ /* Overall control/query calls */ - { "control", &getrpcinfo, }, - { "control", &help, }, - { "control", &stop, }, - { "control", &uptime, }, + {"control", &getrpcinfo}, + {"control", &help}, + {"control", &stop}, + {"control", &uptime}, }; -// clang-format on CRPCTable::CRPCTable() { diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp index 45de1e39d2c0..168a47cb1a87 100644 --- a/src/rpc/txoutproof.cpp +++ b/src/rpc/txoutproof.cpp @@ -177,8 +177,6 @@ static RPCHelpMan verifytxoutproof() void RegisterTxoutProofRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ - // category actor (function) - // -------- ---------------- {"blockchain", &gettxoutproof}, {"blockchain", &verifytxoutproof}, }; diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index d7fdae8cd609..b55a93fe5185 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -1153,81 +1153,77 @@ RPCHelpMan abortrescan(); Span GetWalletRPCCommands() { -// clang-format off -static const CRPCCommand commands[] = -{ // category actor (function) - // ------------------ ------------------------ - { "rawtransactions", &fundrawtransaction, }, - { "wallet", &abandontransaction, }, - { "wallet", &abortrescan, }, - { "wallet", &addmultisigaddress, }, - { "wallet", &backupwallet, }, - { "wallet", &createwallet, }, - { "wallet", &restorewallet, }, - { "wallet", &dumphdinfo, }, - { "wallet", &dumpprivkey, }, - { "wallet", &dumpwallet, }, - { "wallet", &encryptwallet, }, - { "wallet", &getaddressesbylabel, }, - { "wallet", &getaddressinfo, }, - { "wallet", &getbalance, }, - { "wallet", &getnewaddress, }, - { "wallet", &getrawchangeaddress, }, - { "wallet", &getreceivedbyaddress, }, - { "wallet", &getreceivedbylabel, }, - { "wallet", &gettransaction, }, - { "wallet", &getunconfirmedbalance, }, - { "wallet", &getbalances, }, - { "wallet", &getwalletinfo, }, - { "wallet", &importaddress, }, - { "wallet", &importelectrumwallet, }, - { "wallet", &importdescriptors, }, - { "wallet", &importmulti, }, - { "wallet", &importprivkey, }, - { "wallet", &importprunedfunds, }, - { "wallet", &importpubkey, }, - { "wallet", &importwallet, }, - { "wallet", &keypoolrefill, }, - { "wallet", &listaddressbalances, }, - { "wallet", &listaddressgroupings, }, - { "wallet", &listdescriptors, }, - { "wallet", &listlabels, }, - { "wallet", &listlockunspent, }, - { "wallet", &listreceivedbyaddress, }, - { "wallet", &listreceivedbylabel, }, - { "wallet", &listsinceblock, }, - { "wallet", &listtransactions, }, - { "wallet", &listunspent, }, - { "wallet", &listwalletdir, }, - { "wallet", &listwallets, }, - { "wallet", &loadwallet, }, - { "wallet", &lockunspent, }, - { "wallet", &newkeypool, }, - { "wallet", &removeprunedfunds, }, - { "wallet", &rescanblockchain, }, - { "wallet", &send, }, - { "wallet", &sendmany, }, - { "wallet", &sendtoaddress, }, - { "wallet", &sethdseed, }, - { "wallet", &setcoinjoinrounds, }, - { "wallet", &setcoinjoinamount, }, - { "wallet", &setlabel, }, - { "wallet", &settxfee, }, - { "wallet", &setwalletflag, }, - { "wallet", &signmessage, }, - { "wallet", &signrawtransactionwithwallet, }, - { "wallet", &simulaterawtransaction, }, - { "wallet", &unloadwallet, }, - { "wallet", &upgradewallet, }, - { "wallet", &upgradetohd, }, - { "wallet", &walletlock, }, - { "wallet", &walletpassphrasechange, }, - { "wallet", &walletpassphrase, }, - { "wallet", &walletprocesspsbt, }, - { "wallet", &walletcreatefundedpsbt, }, - { "wallet", &wipewallettxes, }, -}; -// clang-format on + static const CRPCCommand commands[]{ + {"rawtransactions", &fundrawtransaction}, + {"wallet", &abandontransaction}, + {"wallet", &abortrescan}, + {"wallet", &addmultisigaddress}, + {"wallet", &backupwallet}, + {"wallet", &createwallet}, + {"wallet", &restorewallet}, + {"wallet", &dumphdinfo}, + {"wallet", &dumpprivkey}, + {"wallet", &dumpwallet}, + {"wallet", &encryptwallet}, + {"wallet", &getaddressesbylabel}, + {"wallet", &getaddressinfo}, + {"wallet", &getbalance}, + {"wallet", &getnewaddress}, + {"wallet", &getrawchangeaddress}, + {"wallet", &getreceivedbyaddress}, + {"wallet", &getreceivedbylabel}, + {"wallet", &gettransaction}, + {"wallet", &getunconfirmedbalance}, + {"wallet", &getbalances}, + {"wallet", &getwalletinfo}, + {"wallet", &importaddress}, + {"wallet", &importelectrumwallet}, + {"wallet", &importdescriptors}, + {"wallet", &importmulti}, + {"wallet", &importprivkey}, + {"wallet", &importprunedfunds}, + {"wallet", &importpubkey}, + {"wallet", &importwallet}, + {"wallet", &keypoolrefill}, + {"wallet", &listaddressbalances}, + {"wallet", &listaddressgroupings}, + {"wallet", &listdescriptors}, + {"wallet", &listlabels}, + {"wallet", &listlockunspent}, + {"wallet", &listreceivedbyaddress}, + {"wallet", &listreceivedbylabel}, + {"wallet", &listsinceblock}, + {"wallet", &listtransactions}, + {"wallet", &listunspent}, + {"wallet", &listwalletdir}, + {"wallet", &listwallets}, + {"wallet", &loadwallet}, + {"wallet", &lockunspent}, + {"wallet", &newkeypool}, + {"wallet", &removeprunedfunds}, + {"wallet", &rescanblockchain}, + {"wallet", &send}, + {"wallet", &sendmany}, + {"wallet", &sendtoaddress}, + {"wallet", &sethdseed}, + {"wallet", &setcoinjoinrounds}, + {"wallet", &setcoinjoinamount}, + {"wallet", &setlabel}, + {"wallet", &settxfee}, + {"wallet", &setwalletflag}, + {"wallet", &signmessage}, + {"wallet", &signrawtransactionwithwallet}, + {"wallet", &simulaterawtransaction}, + {"wallet", &unloadwallet}, + {"wallet", &upgradewallet}, + {"wallet", &upgradetohd}, + {"wallet", &walletlock}, + {"wallet", &walletpassphrasechange}, + {"wallet", &walletpassphrase}, + {"wallet", &walletprocesspsbt}, + {"wallet", &walletcreatefundedpsbt}, + {"wallet", &wipewallettxes}, + }; return commands; } } // namespace wallet diff --git a/src/zmq/zmqrpc.cpp b/src/zmq/zmqrpc.cpp index 7aa933248a0b..7ae3cf0c1e65 100644 --- a/src/zmq/zmqrpc.cpp +++ b/src/zmq/zmqrpc.cpp @@ -51,10 +51,8 @@ static RPCHelpMan getzmqnotifications() }; } -const CRPCCommand commands[] = -{ // category actor (function) - // ----------------- ----------------------- - { "zmq", &getzmqnotifications, }, +const CRPCCommand commands[]{ + {"zmq", &getzmqnotifications}, }; } // anonymous namespace From 674557538e1f5db60b480a5ab6fabe604d79b2a3 Mon Sep 17 00:00:00 2001 From: MacroFake Date: Mon, 23 May 2022 09:40:49 +0200 Subject: [PATCH 083/656] Merge bitcoin/bitcoin#25184: refactor: Remove defunct attributes.h includes 71a8dbe5da0ec2c17c448eb3303eb30615869813 refactor: Remove defunct attributes.h includes (Ben Woosley) Pull request description: Since the removal of NODISCARD in 81d5af42f4dba5b68a597536cad7f61894dc22a3, the only attributes.h def is LIFETIMEBOUND, and it's included in many more places that it is used. This removes all includes which do not have an associated use of LIFETIMEBOUND, and adds it to the following files, due to their use of the same: * src/validationinterface.h * src/script/standard.h See also #20499. Top commit has no ACKs. Tree-SHA512: f3e10a5cda5ab78371b77b702f4a241ff69d490a16cc6059f1a4202b97c584accdbc951cc7b6120eae94bee3b9249e9117b45cf6ed1a5228ca23b5638fcf7b7b --- src/base58.h | 1 - src/core_io.h | 1 - src/init.cpp | 1 - src/netaddress.h | 1 - src/node/transaction.h | 1 - src/psbt.h | 1 - src/script/standard.h | 1 + src/test/fuzz/util.h | 1 - src/util/moneystr.h | 1 - src/util/strencodings.h | 1 - src/util/system.h | 1 - src/validationinterface.cpp | 1 + 12 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/base58.h b/src/base58.h index 9ba5af73e059..d2a8d5e3bc47 100644 --- a/src/base58.h +++ b/src/base58.h @@ -14,7 +14,6 @@ #ifndef BITCOIN_BASE58_H #define BITCOIN_BASE58_H -#include #include #include diff --git a/src/core_io.h b/src/core_io.h index d3f424d1363f..8dd365f95518 100644 --- a/src/core_io.h +++ b/src/core_io.h @@ -5,7 +5,6 @@ #ifndef BITCOIN_CORE_IO_H #define BITCOIN_CORE_IO_H -#include #include #include diff --git a/src/init.cpp b/src/init.cpp index 25b2ca72090c..805d208e2c55 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -121,7 +121,6 @@ #include #ifndef WIN32 -#include #include #include #include diff --git a/src/netaddress.h b/src/netaddress.h index 053b3369fc61..eb5a822a38a3 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -9,7 +9,6 @@ #include #endif -#include #include #include #include diff --git a/src/node/transaction.h b/src/node/transaction.h index 3156e841e594..9617c1f99b92 100644 --- a/src/node/transaction.h +++ b/src/node/transaction.h @@ -5,7 +5,6 @@ #ifndef BITCOIN_NODE_TRANSACTION_H #define BITCOIN_NODE_TRANSACTION_H -#include #include #include #include diff --git a/src/psbt.h b/src/psbt.h index d3d778d8952a..7069a58e61f0 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -5,7 +5,6 @@ #ifndef BITCOIN_PSBT_H #define BITCOIN_PSBT_H -#include #include #include #include diff --git a/src/script/standard.h b/src/script/standard.h index 650b95fac890..1b047f1925ce 100644 --- a/src/script/standard.h +++ b/src/script/standard.h @@ -6,6 +6,7 @@ #ifndef BITCOIN_SCRIPT_STANDARD_H #define BITCOIN_SCRIPT_STANDARD_H +#include #include + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + + @@ -74,8 +77,8 @@
@@ -88,20 +91,26 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Data Structures
+
Data Structures
Here are the data structures with brief descriptions:
- +
 Cmi_heap_area_tAn area of heap space contains blocks of a single size
 Cmi_stl_allocatorstd::allocator implementation for mimalloc for use in STL containers
 Cmi_stl_allocatorstd::allocator implementation for mimalloc for use in STL containers
@@ -109,7 +118,7 @@ diff --git a/depends/mimalloc/docs/bench.html b/depends/mimalloc/docs/bench.html index 6c4728958088..c896e7fcb092 100644 --- a/depends/mimalloc/docs/bench.html +++ b/depends/mimalloc/docs/bench.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Performance + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,14 +91,20 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
-
Performance
+
+
Performance

We tested mimalloc against many other top allocators over a wide range of benchmarks, ranging from various real world programs to synthetic benchmarks that see how the allocator behaves under more extreme circumstances.

@@ -107,7 +116,7 @@ diff --git a/depends/mimalloc/docs/build.html b/depends/mimalloc/docs/build.html index dbcc0d7521be..9849b055f2b3 100644 --- a/depends/mimalloc/docs/build.html +++ b/depends/mimalloc/docs/build.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Building + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,17 +91,23 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
-
Building
+
+
Building
-

Checkout the sources from Github:

git clone https://github.com/microsoft/mimalloc
+

Checkout the sources from GitHub:

git clone https://github.com/microsoft/mimalloc

Windows

Open ide/vs2019/mimalloc.sln in Visual Studio 2019 and build (or ide/vs2017/mimalloc.sln). The mimalloc project builds a static library (in out/msvc-x64), while the mimalloc-override project builds a DLL for overriding malloc in the entire program.

macOS, Linux, BSD, etc.

@@ -130,7 +139,7 @@

macOS, Linux, BSD, etc.

diff --git a/depends/mimalloc/docs/classes.html b/depends/mimalloc/docs/classes.html index b744c4d90041..e86cf9f2f1ee 100644 --- a/depends/mimalloc/docs/classes.html +++ b/depends/mimalloc/docs/classes.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Data Structure Index + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,20 +91,26 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Data Structure Index
+
Data Structure Index
@@ -109,7 +118,7 @@ diff --git a/depends/mimalloc/docs/clipboard.js b/depends/mimalloc/docs/clipboard.js new file mode 100644 index 000000000000..42c1fb0e02dc --- /dev/null +++ b/depends/mimalloc/docs/clipboard.js @@ -0,0 +1,61 @@ +/** + +The code below is based on the Doxygen Awesome project, see +https://github.com/jothepro/doxygen-awesome-css + +MIT License + +Copyright (c) 2021 - 2022 jothepro + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +*/ + +let clipboard_title = "Copy to clipboard" +let clipboard_icon = `` +let clipboard_successIcon = `` +let clipboard_successDuration = 1000 + +$(function() { + if(navigator.clipboard) { + const fragments = document.getElementsByClassName("fragment") + for(const fragment of fragments) { + const clipboard_div = document.createElement("div") + clipboard_div.classList.add("clipboard") + clipboard_div.innerHTML = clipboard_icon + clipboard_div.title = clipboard_title + $(clipboard_div).click(function() { + const content = this.parentNode.cloneNode(true) + // filter out line number and folded fragments from file listings + content.querySelectorAll(".lineno, .ttc, .foldclosed").forEach((node) => { node.remove() }) + let text = content.textContent + // remove trailing newlines and trailing spaces from empty lines + text = text.replace(/^\s*\n/gm,'\n').replace(/\n*$/,'') + navigator.clipboard.writeText(text); + this.classList.add("success") + this.innerHTML = clipboard_successIcon + window.setTimeout(() => { // switch back to normal icon after timeout + this.classList.remove("success") + this.innerHTML = clipboard_icon + }, clipboard_successDuration); + }) + fragment.insertBefore(clipboard_div, fragment.firstChild) + } + } +}) diff --git a/depends/mimalloc/docs/cookie.js b/depends/mimalloc/docs/cookie.js new file mode 100644 index 000000000000..53ad21d98119 --- /dev/null +++ b/depends/mimalloc/docs/cookie.js @@ -0,0 +1,58 @@ +/*! + Cookie helper functions + Copyright (c) 2023 Dimitri van Heesch + Released under MIT license. +*/ +let Cookie = { + cookie_namespace: 'doxygen_', + + readSetting(cookie,defVal) { + if (window.chrome) { + const val = localStorage.getItem(this.cookie_namespace+cookie) || + sessionStorage.getItem(this.cookie_namespace+cookie); + if (val) return val; + } else { + let myCookie = this.cookie_namespace+cookie+"="; + if (document.cookie) { + const index = document.cookie.indexOf(myCookie); + if (index != -1) { + const valStart = index + myCookie.length; + let valEnd = document.cookie.indexOf(";", valStart); + if (valEnd == -1) { + valEnd = document.cookie.length; + } + return document.cookie.substring(valStart, valEnd); + } + } + } + return defVal; + }, + + writeSetting(cookie,val,days=10*365) { // default days='forever', 0=session cookie, -1=delete + if (window.chrome) { + if (days==0) { + sessionStorage.setItem(this.cookie_namespace+cookie,val); + } else { + localStorage.setItem(this.cookie_namespace+cookie,val); + } + } else { + let date = new Date(); + date.setTime(date.getTime()+(days*24*60*60*1000)); + const expiration = days!=0 ? "expires="+date.toGMTString()+";" : ""; + document.cookie = this.cookie_namespace + cookie + "=" + + val + "; SameSite=Lax;" + expiration + "path=/"; + } + }, + + eraseSetting(cookie) { + if (window.chrome) { + if (localStorage.getItem(this.cookie_namespace+cookie)) { + localStorage.removeItem(this.cookie_namespace+cookie); + } else if (sessionStorage.getItem(this.cookie_namespace+cookie)) { + sessionStorage.removeItem(this.cookie_namespace+cookie); + } + } else { + this.writeSetting(cookie,'',-1); + } + }, +} diff --git a/depends/mimalloc/docs/doxygen.css b/depends/mimalloc/docs/doxygen.css index f090ef799cc0..0ee13f355c97 100644 --- a/depends/mimalloc/docs/doxygen.css +++ b/depends/mimalloc/docs/doxygen.css @@ -1,26 +1,31 @@ -/* The standard CSS for doxygen 1.9.1 */ +/* The standard CSS for doxygen 1.13.1*/ -body, table, div, p, dl { - font: 400 14px/22px Roboto,sans-serif; +body { + background-color: white; + color: black; } -p.reference, p.definition { - font: 400 14px/22px Roboto,sans-serif; +body, table, div, p, dl { + font-weight: 400; + font-size: 14px; + font-family: Roboto,sans-serif; + line-height: 22px; } /* @group Heading Levels */ -h1.groupheader { - font-size: 150%; -} - .title { - font: 400 14px/28px Roboto,sans-serif; + font-family: Roboto,sans-serif; + line-height: 28px; font-size: 150%; font-weight: bold; margin: 10px 2px; } +h1.groupheader { + font-size: 150%; +} + h2.groupheader { border-bottom: 1px solid #474D4E; color: #0A0B0B; @@ -53,15 +58,6 @@ dt { font-weight: bold; } -ul.multicol { - -moz-column-gap: 1em; - -webkit-column-gap: 1em; - column-gap: 1em; - -moz-column-count: 3; - -webkit-column-count: 3; - column-count: 3; -} - p.startli, p.startdd { margin-top: 2px; } @@ -113,7 +109,6 @@ h3.version { } div.navtab { - border-right: 1px solid #636C6D; padding-right: 15px; text-align: right; line-height: 110%; @@ -127,6 +122,7 @@ td.navtab { padding-right: 6px; padding-left: 6px; } + td.navtabHL { background-image: url('tab_a.png'); background-repeat:repeat-x; @@ -135,7 +131,7 @@ td.navtabHL { } td.navtabHL a, td.navtabHL a:visited { - color: #fff; + color: white; text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); } @@ -151,6 +147,12 @@ div.qindex{ color: #A0A0A0; } +#main-menu a:focus { + outline: auto; + z-index: 10; + position: relative; +} + dt.alphachar{ font-size: 180%; font-weight: bold; @@ -176,6 +178,10 @@ dt.alphachar{ line-height: 1.15em; } +.classindex dl.even { + background-color: white; +} + .classindex dl.odd { background-color: #F0F1F1; } @@ -206,11 +212,13 @@ a { } a:hover { - text-decoration: underline; + text-decoration: none; + background: linear-gradient(to bottom, transparent 0,transparent calc(100% - 1px), currentColor 100%); } -.contents a.qindexHL:visited { - color: #FFFFFF; +a:hover > span.arrow { + text-decoration: none; + background : #F2F3F3; } a.el { @@ -221,21 +229,75 @@ a.elRef { } a.code, a.code:visited, a.line, a.line:visited { - color: #171919; + color: #171919; } a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { - color: #171919; + color: #171919; } +a.code.hl_class { /* style for links to class names in code snippets */ } +a.code.hl_struct { /* style for links to struct names in code snippets */ } +a.code.hl_union { /* style for links to union names in code snippets */ } +a.code.hl_interface { /* style for links to interface names in code snippets */ } +a.code.hl_protocol { /* style for links to protocol names in code snippets */ } +a.code.hl_category { /* style for links to category names in code snippets */ } +a.code.hl_exception { /* style for links to exception names in code snippets */ } +a.code.hl_service { /* style for links to service names in code snippets */ } +a.code.hl_singleton { /* style for links to singleton names in code snippets */ } +a.code.hl_concept { /* style for links to concept names in code snippets */ } +a.code.hl_namespace { /* style for links to namespace names in code snippets */ } +a.code.hl_package { /* style for links to package names in code snippets */ } +a.code.hl_define { /* style for links to macro names in code snippets */ } +a.code.hl_function { /* style for links to function names in code snippets */ } +a.code.hl_variable { /* style for links to variable names in code snippets */ } +a.code.hl_typedef { /* style for links to typedef names in code snippets */ } +a.code.hl_enumvalue { /* style for links to enum value names in code snippets */ } +a.code.hl_enumeration { /* style for links to enumeration names in code snippets */ } +a.code.hl_signal { /* style for links to Qt signal names in code snippets */ } +a.code.hl_slot { /* style for links to Qt slot names in code snippets */ } +a.code.hl_friend { /* style for links to friend names in code snippets */ } +a.code.hl_dcop { /* style for links to KDE3 DCOP names in code snippets */ } +a.code.hl_property { /* style for links to property names in code snippets */ } +a.code.hl_event { /* style for links to event names in code snippets */ } +a.code.hl_sequence { /* style for links to sequence names in code snippets */ } +a.code.hl_dictionary { /* style for links to dictionary names in code snippets */ } + /* @end */ dl.el { margin-left: -1cm; } +ul.check { + list-style:none; + text-indent: -16px; + padding-left: 38px; +} +li.unchecked:before { + content: "\2610\A0"; +} +li.checked:before { + content: "\2611\A0"; +} + +ol { + text-indent: 0px; +} + ul { - overflow: hidden; /*Fixed: list item bullets overlap floating elements*/ + text-indent: 0px; + overflow: visible; +} + +ul.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; + column-count: 3; + list-style-type: none; } #side-nav ul { @@ -249,35 +311,70 @@ ul { .fragment { text-align: left; direction: ltr; - overflow-x: auto; /*Fixed: fragment lines overlap floating elements*/ + overflow-x: auto; overflow-y: hidden; + position: relative; + min-height: 12px; + margin: 10px 0px; + padding: 10px 10px; + border: 1px solid #90989A; + border-radius: 4px; + background-color: #F7F8F8; + color: black; } pre.fragment { - border: 1px solid #90989A; - background-color: #F7F8F8; - padding: 4px 6px; - margin: 4px 8px 4px 2px; + word-wrap: break-word; + font-size: 10pt; + line-height: 125%; + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; +} + +.clipboard { + width: 24px; + height: 24px; + right: 5px; + top: 5px; + opacity: 0; + position: absolute; + display: inline; overflow: auto; - word-wrap: break-word; - font-size: 9pt; - line-height: 125%; - font-family: monospace, fixed; - font-size: 105%; + fill: black; + justify-content: center; + align-items: center; + cursor: pointer; +} + +.clipboard.success { + border: 1px solid black; + border-radius: 4px; +} + +.fragment:hover .clipboard, .clipboard.success { + opacity: .28; } -div.fragment { - padding: 0 0 1px 0; /*Fixed: last line underline overlap border*/ - margin: 4px 8px 4px 2px; - background-color: #F7F8F8; - border: 1px solid #90989A; +.clipboard:hover, .clipboard.success { + opacity: 1 !important; +} + +.clipboard:active:not([class~=success]) svg { + transform: scale(.91); +} + +.clipboard.success svg { + fill: #2EC82E; +} + +.clipboard.success { + border-color: #2EC82E; } div.line { - font-family: monospace, fixed; + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; font-size: 13px; min-height: 13px; - line-height: 1.0; + line-height: 1.2; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ @@ -310,19 +407,35 @@ div.line.glow { box-shadow: 0 0 10px cyan; } +span.fold { + margin-left: 5px; + margin-right: 1px; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; + display: inline-block; + width: 12px; + height: 12px; + background-repeat:no-repeat; + background-position:center; +} span.lineno { padding-right: 4px; + margin-right: 9px; text-align: right; - border-right: 2px solid #0F0; + border-right: 2px solid #00FF00; + color: black; background-color: #E8E8E8; white-space: pre; } -span.lineno a { +span.lineno a, span.lineno a:visited { + color: #171919; background-color: #D8D8D8; } span.lineno a:hover { + color: #171919; background-color: #C8C8C8; } @@ -335,24 +448,6 @@ span.lineno a:hover { user-select: none; } -div.ah, span.ah { - background-color: black; - font-weight: bold; - color: #FFFFFF; - margin-bottom: 3px; - margin-top: 3px; - padding: 0.2em; - border: solid thin #333; - border-radius: 0.5em; - -webkit-border-radius: .5em; - -moz-border-radius: .5em; - box-shadow: 2px 2px 3px #999; - -webkit-box-shadow: 2px 2px 3px #999; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; - background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); - background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000 110%); -} - div.classindex ul { list-style: none; padding-left: 0; @@ -374,7 +469,6 @@ div.groupText { } body { - background-color: white; color: black; margin: 0; } @@ -385,33 +479,15 @@ div.contents { margin-right: 8px; } -td.indexkey { - background-color: #D6D9D9; - font-weight: bold; - border: 1px solid #90989A; - margin: 2px 0px 2px 0; - padding: 2px 10px; - white-space: nowrap; - vertical-align: top; -} - -td.indexvalue { - background-color: #D6D9D9; - border: 1px solid #90989A; - padding: 2px 10px; - margin: 2px 0px; -} - -tr.memlist { - background-color: #DADDDE; -} - p.formulaDsp { text-align: center; } -img.formulaDsp { - +img.dark-mode-visible { + display: none; +} +img.light-mode-visible { + display: none; } img.formulaInl, img.inline { @@ -437,52 +513,63 @@ address.footer { img.footer { border: 0px; vertical-align: middle; + width: 104px; +} + +.compoundTemplParams { + color: #171919; + font-size: 80%; + line-height: 120%; } /* @group Code Colorization */ span.keyword { - color: #008000 + color: #008000; } span.keywordtype { - color: #604020 + color: #604020; } span.keywordflow { - color: #e08000 + color: #E08000; } span.comment { - color: #800000 + color: #800000; } span.preprocessor { - color: #806020 + color: #806020; } span.stringliteral { - color: #002080 + color: #002080; } span.charliteral { - color: #008080 + color: #008080; +} + +span.xmlcdata { + color: black; } span.vhdldigit { - color: #ff00ff + color: #FF00FF; } span.vhdlchar { - color: #000000 + color: #000000; } span.vhdlkeyword { - color: #700070 + color: #700070; } span.vhdllogic { - color: #ff0000 + color: #FF0000; } blockquote { @@ -492,34 +579,8 @@ blockquote { padding: 0 12px 0 16px; } -blockquote.DocNodeRTL { - border-left: 0; - border-right: 2px solid #5B6364; - margin: 0 4px 0 24px; - padding: 0 16px 0 12px; -} - /* @end */ -/* -.search { - color: #003399; - font-weight: bold; -} - -form.search { - margin-bottom: 0px; - margin-top: 0px; -} - -input.search { - font-size: 75%; - color: #000080; - font-weight: normal; - background-color: #e8eef2; -} -*/ - td.tiny { font-size: 75%; } @@ -527,11 +588,12 @@ td.tiny { .dirtab { padding: 4px; border-collapse: collapse; - border: 1px solid #636C6D; + border: 1px solid #060606; } th.dirtab { - background: #D6D9D9; + background-color: #0B0C0C; + color: #FFFFFF; font-weight: bold; } @@ -641,15 +703,6 @@ table.memberdecls { margin-left: 9px; } -.memnav { - background-color: #D6D9D9; - border: 1px solid #636C6D; - text-align: center; - margin: 2px; - margin-right: 15px; - padding: 2px; -} - .mempage { width: 100%; } @@ -689,33 +742,24 @@ table.memberdecls { font-weight: bold; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); background-color: #BDC2C3; - /* opera specific markup */ box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); border-top-right-radius: 4px; - /* firefox specific markup */ - -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; - -moz-border-radius-topright: 4px; - /* webkit specific markup */ - -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); - -webkit-border-top-right-radius: 4px; - } .overload { - font-family: "courier new",courier,monospace; + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; font-size: 65%; } .memdoc, dl.reflist dd { - border-bottom: 1px solid #697273; - border-left: 1px solid #697273; - border-right: 1px solid #697273; + border-bottom: 1px solid #697273; + border-left: 1px solid #697273; + border-right: 1px solid #697273; padding: 6px 10px 2px 10px; - background-color: #F7F8F8; border-top-width: 0; background-image:url('nav_g.png'); background-repeat:repeat-x; - background-color: #FFFFFF; + background-color: white; /* opera specific markup */ border-bottom-left-radius: 4px; border-bottom-right-radius: 4px; @@ -745,36 +789,44 @@ dl.reflist dd { .paramtype { white-space: nowrap; + padding: 0px; + padding-bottom: 1px; } .paramname { - color: #602020; white-space: nowrap; + padding: 0px; + padding-bottom: 1px; + margin-left: 2px; } + .paramname em { + color: #602020; font-style: normal; + margin-right: 1px; } -.paramname code { - line-height: 14px; + +.paramname .paramdefval { + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; } .params, .retval, .exception, .tparams { margin-left: 0px; padding-left: 0px; -} +} .params .paramname, .retval .paramname, .tparams .paramname, .exception .paramname { font-weight: bold; vertical-align: top; } - + .params .paramtype, .tparams .paramtype { font-style: italic; vertical-align: top; -} - +} + .params .paramdir, .tparams .paramdir { - font-family: "courier new",courier,monospace; + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; vertical-align: top; } @@ -858,9 +910,14 @@ div.directory { border-left: 1px solid rgba(0,0,0,0.05); } +.directory tr.odd { + padding-left: 6px; + background-color: #F0F1F1; +} + .directory tr.even { padding-left: 6px; - background-color: #EDEFEF; + background-color: white; } .directory img { @@ -896,7 +953,8 @@ div.directory { } .icon { - font-family: Arial, Helvetica; + font-family: Arial,Helvetica; + line-height: normal; font-weight: bold; font-size: 12px; height: 14px; @@ -920,8 +978,7 @@ div.directory { width: 24px; height: 18px; margin-bottom: 4px; - background-image:url('folderopen.png'); - background-position: 0px -4px; + background-image:url('folderopen.svg'); background-repeat: repeat-y; vertical-align:top; display: inline-block; @@ -931,8 +988,7 @@ div.directory { width: 24px; height: 18px; margin-bottom: 4px; - background-image:url('folderclosed.png'); - background-position: 0px -4px; + background-image:url('folderclosed.svg'); background-repeat: repeat-y; vertical-align:top; display: inline-block; @@ -942,17 +998,13 @@ div.directory { width: 24px; height: 18px; margin-bottom: 4px; - background-image:url('doc.png'); + background-image:url('doc.svg'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } -table.directory { - font: 400 14px Roboto,sans-serif; -} - /* @end */ div.dynheader { @@ -994,15 +1046,10 @@ table.doxtable th { } table.fieldtable { - /*width: 100%;*/ margin-bottom: 10px; border: 1px solid #697273; border-spacing: 0px; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; border-radius: 4px; - -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; - -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); } @@ -1010,7 +1057,7 @@ table.fieldtable { padding: 3px 7px 2px; } -.fieldtable td.fieldtype, .fieldtable td.fieldname { +.fieldtable td.fieldtype, .fieldtable td.fieldname, .fieldtable td.fieldinit { white-space: nowrap; border-right: 1px solid #697273; border-bottom: 1px solid #697273; @@ -1021,15 +1068,20 @@ table.fieldtable { padding-top: 3px; } +.fieldtable td.fieldinit { + padding-top: 3px; + text-align: right; +} + + .fieldtable td.fielddoc { border-bottom: 1px solid #697273; - /*width: 100%;*/ } .fieldtable td.fielddoc p:first-child { margin-top: 0px; -} - +} + .fieldtable td.fielddoc p:last-child { margin-bottom: 2px; } @@ -1039,7 +1091,7 @@ table.fieldtable { } .fieldtable th { - background-image:url('nav_f.png'); + background-image: url('nav_f.png'); background-repeat:repeat-x; background-color: #C4C8C9; font-size: 90%; @@ -1048,10 +1100,6 @@ table.fieldtable { padding-top: 5px; text-align:left; font-weight: 400; - -moz-border-radius-topleft: 4px; - -moz-border-radius-topright: 4px; - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom: 1px solid #697273; @@ -1071,12 +1119,12 @@ table.fieldtable { .navpath ul { font-size: 11px; - background-image:url('tab_b.png'); + background-image: url('tab_b.png'); background-repeat:repeat-x; background-position: 0 -5px; height:30px; line-height:30px; - color:#494F50; + color:#040404; border:solid 1px #8C9596; overflow:hidden; margin:0px; @@ -1092,24 +1140,24 @@ table.fieldtable { background-image:url('bc_s.png'); background-repeat:no-repeat; background-position:right; - color:#0A0B0B; + color: #0A0B0B; } .navpath li.navelem a { height:32px; display:block; - text-decoration: none; outline: none; color: #040404; font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - text-decoration: none; + text-decoration: none; } .navpath li.navelem a:hover { - color:#2E3233; + color: white; + text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); } .navpath li.footer @@ -1121,7 +1169,7 @@ table.fieldtable { background-image:none; background-repeat:no-repeat; background-position:right; - color:#0A0B0B; + color: #050505; font-size: 8pt; } @@ -1133,7 +1181,7 @@ div.summary padding-right: 5px; width: 50%; text-align: right; -} +} div.summary a { @@ -1148,7 +1196,7 @@ table.classindex margin-right: 3%; width: 94%; border: 0; - border-spacing: 0; + border-spacing: 0; padding: 0; } @@ -1166,7 +1214,7 @@ div.ingroups a div.header { - background-image:url('nav_h.png'); + background-image: url('nav_h.png'); background-repeat:repeat-x; background-color: #F2F3F3; margin: 0px; @@ -1187,17 +1235,13 @@ dl { padding: 0 0 0 0; } -/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug, dl.examples */ +/* + dl.section { margin-left: 0px; padding-left: 0px; } -dl.section.DocNodeRTL { - margin-right: 0px; - padding-right: 0px; -} - dl.note { margin-left: -7px; padding-left: 3px; @@ -1205,33 +1249,13 @@ dl.note { border-color: #D0C000; } -dl.note.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #D0C000; -} - -dl.warning, dl.attention { +dl.warning, dl.attention, dl.important { margin-left: -7px; padding-left: 3px; border-left: 4px solid; border-color: #FF0000; } -dl.warning.DocNodeRTL, dl.attention.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #FF0000; -} - dl.pre, dl.post, dl.invariant { margin-left: -7px; padding-left: 3px; @@ -1239,16 +1263,6 @@ dl.pre, dl.post, dl.invariant { border-color: #00D000; } -dl.pre.DocNodeRTL, dl.post.DocNodeRTL, dl.invariant.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #00D000; -} - dl.deprecated { margin-left: -7px; padding-left: 3px; @@ -1256,16 +1270,6 @@ dl.deprecated { border-color: #505050; } -dl.deprecated.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #505050; -} - dl.todo { margin-left: -7px; padding-left: 3px; @@ -1273,16 +1277,6 @@ dl.todo { border-color: #00C0E0; } -dl.todo.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #00C0E0; -} - dl.test { margin-left: -7px; padding-left: 3px; @@ -1290,16 +1284,6 @@ dl.test { border-color: #3030E0; } -dl.test.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #3030E0; -} - dl.bug { margin-left: -7px; padding-left: 3px; @@ -1307,20 +1291,110 @@ dl.bug { border-color: #C08050; } -dl.bug.DocNodeRTL { - margin-left: 0; - padding-left: 0; - border-left: 0; - margin-right: -7px; - padding-right: 3px; - border-right: 4px solid; - border-color: #C08050; +*/ + +dl.bug dt a, dl.deprecated dt a, dl.todo dt a, dl.test a { + font-weight: bold !important; +} + +dl.warning, dl.attention, dl.important, dl.note, dl.deprecated, dl.bug, +dl.invariant, dl.pre, dl.post, dl.todo, dl.test, dl.remark { + padding: 10px; + margin: 10px 0px; + overflow: hidden; + margin-left: 0; + border-radius: 4px; } dl.section dd { - margin-bottom: 6px; + margin-bottom: 2px; +} + +dl.warning, dl.attention, dl.important { + background: #f8d1cc; + border-left: 8px solid #b61825; + color: #75070f; } +dl.warning dt, dl.attention dt, dl.important dt { + color: #b61825; +} + +dl.note, dl.remark { + background: #faf3d8; + border-left: 8px solid #f3a600; + color: #5f4204; +} + +dl.note dt, dl.remark dt { + color: #f3a600; +} + +dl.todo { + background: #e4f3ff; + border-left: 8px solid #1879C4; + color: #274a5c; +} + +dl.todo dt { + color: #1879C4; +} + +dl.test { + background: #e8e8ff; + border-left: 8px solid #3939C4; + color: #1a1a5c; +} + +dl.test dt { + color: #3939C4; +} + +dl.bug dt a { + color: #5b2bdd !important; +} + +dl.bug { + background: #e4dafd; + border-left: 8px solid #5b2bdd; + color: #2a0d72; +} + +dl.bug dt a { + color: #5b2bdd !important; +} + +dl.deprecated { + background: #ecf0f3; + border-left: 8px solid #5b6269; + color: #43454a; +} + +dl.deprecated dt a { + color: #5b6269 !important; +} + +dl.note dd, dl.warning dd, dl.pre dd, dl.post dd, +dl.remark dd, dl.attention dd, dl.important dd, dl.invariant dd, +dl.bug dd, dl.deprecated dd, dl.todo dd, dl.test dd { + margin-inline-start: 0px; +} + +dl.invariant, dl.pre, dl.post { + background: #d8f1e3; + border-left: 8px solid #44b86f; + color: #265532; +} + +dl.invariant dt, dl.pre dt, dl.post dt { + color: #44b86f; +} + + +#projectrow +{ + height: 56px; +} #projectlogo { @@ -1328,34 +1402,43 @@ dl.section dd { vertical-align: bottom; border-collapse: separate; } - + #projectlogo img -{ +{ border: 0px none; } - + #projectalign { vertical-align: middle; + padding-left: 0.5em; } #projectname { - font: 300% Tahoma, Arial,sans-serif; + font-size: 200%; + font-family: Tahoma,Arial,sans-serif; margin: 0px; padding: 2px 0px; } - + +#side-nav #projectname +{ + font-size: 130%; +} + #projectbrief { - font: 120% Tahoma, Arial,sans-serif; + font-size: 90%; + font-family: Tahoma,Arial,sans-serif; margin: 0px; padding: 0px; } #projectnumber { - font: 50% Tahoma, Arial,sans-serif; + font-size: 50%; + font-family: 50% Tahoma,Arial,sans-serif; margin: 0px; padding: 0px; } @@ -1366,6 +1449,7 @@ dl.section dd { margin: 0px; width: 100%; border-bottom: 1px solid #212425; + background-color: white; } .image @@ -1398,11 +1482,6 @@ dl.section dd { font-weight: bold; } -div.zoom -{ - border: 1px solid #4F5657; -} - dl.citelist { margin-bottom:50px; } @@ -1433,27 +1512,16 @@ div.toc { width: 200px; } -.PageDocRTL-title div.toc { - float: left !important; - text-align: right; -} - div.toc li { - background: url("bdwn.png") no-repeat scroll 0 5px transparent; - font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; + background: url("data:image/svg+xml;utf8,&%238595;") no-repeat scroll 0 5px transparent; + font: 10px/1.2 Verdana,'DejaVu Sans',Geneva,sans-serif; margin-top: 5px; padding-left: 10px; padding-top: 2px; } -.PageDocRTL-title div.toc li { - background-position-x: right !important; - padding-left: 0 !important; - padding-right: 10px; -} - div.toc h3 { - font: bold 12px/1.2 Arial,FreeSans,sans-serif; + font: bold 12px/1.2 Verdana,'DejaVu Sans',Geneva,sans-serif; color: #171919; border-bottom: 0 none; margin: 0; @@ -1463,22 +1531,19 @@ div.toc ul { list-style: none outside none; border: medium none; padding: 0px; -} - -div.toc li.level1 { - margin-left: 0px; } -div.toc li.level2 { +div.toc li[class^='level'] { margin-left: 15px; } -div.toc li.level3 { - margin-left: 30px; +div.toc li.level1 { + margin-left: 0px; } -div.toc li.level4 { - margin-left: 45px; +div.toc li.empty { + background-image: none; + margin-top: 0px; } span.emoji { @@ -1487,24 +1552,8 @@ span.emoji { */ } -.PageDocRTL-title div.toc li.level1 { - margin-left: 0 !important; - margin-right: 0; -} - -.PageDocRTL-title div.toc li.level2 { - margin-left: 0 !important; - margin-right: 15px; -} - -.PageDocRTL-title div.toc li.level3 { - margin-left: 0 !important; - margin-right: 30px; -} - -.PageDocRTL-title div.toc li.level4 { - margin-left: 0 !important; - margin-right: 45px; +span.obfuscator { + display: none; } .inherit_header { @@ -1541,7 +1590,8 @@ tr.heading h2 { #powerTip { cursor: default; - white-space: nowrap; + /*white-space: nowrap;*/ + color: black; background-color: white; border: 1px solid gray; border-radius: 4px 4px 4px 4px; @@ -1564,6 +1614,10 @@ tr.heading h2 { font-weight: bold; } +#powerTip a { + color: #171919; +} + #powerTip div.ttname { font-weight: bold; } @@ -1575,7 +1629,9 @@ tr.heading h2 { #powerTip div { margin: 0px; padding: 0px; - font: 12px/16px Roboto,sans-serif; + font-size: 12px; + font-family: Roboto,sans-serif; + line-height: 16px; } #powerTip:before, #powerTip:after { @@ -1620,12 +1676,12 @@ tr.heading h2 { } #powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { - border-top-color: #FFFFFF; + border-top-color: white; border-width: 10px; margin: 0px -10px; } -#powerTip.n:before { - border-top-color: #808080; +#powerTip.n:before, #powerTip.ne:before, #powerTip.nw:before { + border-top-color: gray; border-width: 11px; margin: 0px -11px; } @@ -1648,13 +1704,13 @@ tr.heading h2 { } #powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { - border-bottom-color: #FFFFFF; + border-bottom-color: white; border-width: 10px; margin: 0px -10px; } #powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { - border-bottom-color: #808080; + border-bottom-color: gray; border-width: 11px; margin: 0px -11px; } @@ -1675,13 +1731,13 @@ tr.heading h2 { left: 100%; } #powerTip.e:after { - border-left-color: #FFFFFF; + border-left-color: gray; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.e:before { - border-left-color: #808080; + border-left-color: gray; border-width: 11px; top: 50%; margin-top: -11px; @@ -1691,13 +1747,13 @@ tr.heading h2 { right: 100%; } #powerTip.w:after { - border-right-color: #FFFFFF; + border-right-color: gray; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.w:before { - border-right-color: #808080; + border-right-color: gray; border-width: 11px; top: 50%; margin-top: -11px; @@ -1758,36 +1814,37 @@ th.markdownTableHeadCenter, td.markdownTableBodyCenter { text-align: center } -.DocNodeRTL { - text-align: right; - direction: rtl; +tt, code, kbd +{ + display: inline-block; +} +tt, code, kbd +{ + vertical-align: top; } +/* @end */ -.DocNodeLTR { - text-align: left; - direction: ltr; +u { + text-decoration: underline; } -table.DocNodeRTL { - width: auto; - margin-right: 0; - margin-left: auto; +details>summary { + list-style-type: none; } -table.DocNodeLTR { - width: auto; - margin-right: auto; - margin-left: 0; +details > summary::-webkit-details-marker { + display: none; } -tt, code, kbd, samp -{ - display: inline-block; - direction:ltr; +details>summary::before { + content: "\25ba"; + padding-right:4px; + font-size: 80%; } -/* @end */ -u { - text-decoration: underline; +details[open]>summary::before { + content: "\25bc"; + padding-right:4px; + font-size: 80%; } diff --git a/depends/mimalloc/docs/doxygen.svg b/depends/mimalloc/docs/doxygen.svg new file mode 100644 index 000000000000..aeb5facb8198 --- /dev/null +++ b/depends/mimalloc/docs/doxygen.svg @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/depends/mimalloc/docs/dynsections.js b/depends/mimalloc/docs/dynsections.js index 3174bd7bebbf..3cc426a65adc 100644 --- a/depends/mimalloc/docs/dynsections.js +++ b/depends/mimalloc/docs/dynsections.js @@ -22,100 +22,177 @@ @licend The above is the entire license notice for the JavaScript code in this file */ -function toggleVisibility(linkObj) -{ - var base = $(linkObj).attr('id'); - var summary = $('#'+base+'-summary'); - var content = $('#'+base+'-content'); - var trigger = $('#'+base+'-trigger'); - var src=$(trigger).attr('src'); - if (content.is(':visible')===true) { - content.hide(); - summary.show(); - $(linkObj).addClass('closed').removeClass('opened'); - $(trigger).attr('src',src.substring(0,src.length-8)+'closed.png'); - } else { - content.show(); - summary.hide(); - $(linkObj).removeClass('closed').addClass('opened'); - $(trigger).attr('src',src.substring(0,src.length-10)+'open.png'); - } - return false; -} -function updateStripes() -{ - $('table.directory tr'). - removeClass('even').filter(':visible:even').addClass('even'); +function toggleVisibility(linkObj) { + return dynsection.toggleVisibility(linkObj); } -function toggleLevel(level) -{ - $('table.directory tr').each(function() { - var l = this.id.split('_').length-1; - var i = $('#img'+this.id.substring(3)); - var a = $('#arr'+this.id.substring(3)); - if (l'); + // add vertical lines to other rows + $('span[class=lineno]').not(':eq(0)').append(''); + // add toggle controls to lines with fold divs + $('div[class=foldopen]').each(function() { + // extract specific id to use + const id = $(this).attr('id').replace('foldopen',''); + // extract start and end foldable fragment attributes + const start = $(this).attr('data-start'); + const end = $(this).attr('data-end'); + // replace normal fold span with controls for the first line of a foldable fragment + $(this).find('span[class=fold]:first').replaceWith(''); + // append div for folded (closed) representation + $(this).after(''); + // extract the first line from the "open" section to represent closed content + const line = $(this).children().first().clone(); + // remove any glow that might still be active on the original line + $(line).removeClass('glow'); + if (start) { + // if line already ends with a start marker (e.g. trailing {), remove it + $(line).html($(line).html().replace(new RegExp('\\s*'+start+'\\s*$','g'),'')); + } + // replace minus with plus symbol + $(line).find('span[class=fold]').css('background-image',codefold.plusImg[relPath]); + // append ellipsis + $(line).append(' '+start+''+end); + // insert constructed line into closed div + $('#foldclosed'+id).html(line); + }); + }, +}; /* @license-end */ diff --git a/depends/mimalloc/docs/environment.html b/depends/mimalloc/docs/environment.html index f571f95f8052..7da7c3053199 100644 --- a/depends/mimalloc/docs/environment.html +++ b/depends/mimalloc/docs/environment.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Environment Options + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,34 +91,48 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
-
Environment Options
+
+
Environment Options
-

You can set further options either programmatically (using mi_option_set), or via environment variables.

+

You can set further options either programmatically (using mi_option_set), or via environment variables:

  • MIMALLOC_SHOW_STATS=1: show statistics when the program terminates.
  • MIMALLOC_VERBOSE=1: show verbose messages.
  • MIMALLOC_SHOW_ERRORS=1: show error and warning messages.
  • -
  • MIMALLOC_PAGE_RESET=0: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server) programs. By setting it to 0 no such page resets will be done which can improve performance for programs that are not long running. As an alternative, the MIMALLOC_RESET_DELAY=<msecs> can be set higher (100ms by default) to make the page reset occur less frequently instead of turning it off completely.
  • -
  • MIMALLOC_LARGE_OS_PAGES=1: use large OS pages (2MiB) when available; for some workloads this can significantly improve performance. Use MIMALLOC_VERBOSE to check if the large OS pages are enabled – usually one needs to explicitly allow large OS pages (as on Windows and Linux). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that can have fragmented memory (for that reason, we generally recommend to use MIMALLOC_RESERVE_HUGE_OS_PAGES instead when possible).
  • -
  • MIMALLOC_RESERVE_HUGE_OS_PAGES=N: where N is the number of 1GiB huge OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on Windows and Linux)). With huge OS pages, it may be beneficial to set the setting MIMALLOC_EAGER_COMMIT_DELAY=N (N is 1 by default) to delay the initial N segments (of 4MiB) of a thread to not allocate in the huge OS pages; this prevents threads that are short lived and allocate just a little to take up space in the huge OS page area (which cannot be reset).
  • -
  • MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N: where N is the numa node. This reserves the huge pages at a specific numa node. (N is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
-

Use caution when using fork in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write for all pages in the original process including the huge OS pages. When any memory is now written in that area, the OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in big increments.

+

Advanced options:

+
    +
  • MIMALLOC_ARENA_EAGER_COMMIT=2: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc allocates segments and pages. Set this to 2 (default) to only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). Note that eager commit only increases the commit but not the actual the peak resident set (rss) so it is generally ok to enable this.
  • +
  • MIMALLOC_PURGE_DELAY=N: the delay in N milli-seconds (by default 10) after which mimalloc will purge OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which can reduce memory fragmentation especially in long running (server) programs. Setting N to 0 purges immediately when a page becomes unused which can improve memory usage but also decreases performance. Setting N to a higher value like 100 can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. Setting it to -1 disables purging completely.
  • +
  • MIMALLOC_PURGE_DECOMMITS=1: By default "purging" memory means unused memory is decommitted (MEM_DECOMMIT on Windows, MADV_DONTNEED (which decresease rss immediately) on mmap systems). Set this to 0 to instead "reset" unused memory on a purge (MEM_RESET on Windows, generally MADV_FREE (which does not decrease rss immediately) on mmap systems). Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual address ranges and decommits within those ranges (to make the underlying physical memory available to other processes).
  • +
+

Further options for large workloads and services:

+
    +
  • MIMALLOC_USE_NUMA_NODES=N: pretend there are at most N NUMA nodes. If not set, the actual NUMA nodes are detected at runtime. Setting N to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed).
  • +
  • MIMALLOC_ALLOW_LARGE_OS_PAGES=1: use large OS pages (2 or 4MiB) when available; for some workloads this can significantly improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process (on Linux and Android). Use MIMALLOC_VERBOSE to check if the large OS pages are enabled – usually one needs to explicitly give permissions for large OS pages (as on Windows and Linux). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that can have fragmented memory (for that reason, we generally recommend to use MIMALLOC_RESERVE_HUGE_OS_PAGES instead whenever possible).
  • +
  • MIMALLOC_RESERVE_HUGE_OS_PAGES=N: where N is the number of 1GiB huge OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. Usually it is better to not use MIMALLOC_ALLOW_LARGE_OS_PAGES=1 in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). Note that we usually need to explicitly give permission for huge OS pages (as on Windows and Linux)). With huge OS pages, it may be beneficial to set the setting MIMALLOC_EAGER_COMMIT_DELAY=N (N is 1 by default) to delay the initial N segments (of 4MiB) of a thread to not allocate in the huge OS pages; this prevents threads that are short lived and allocate just a little to take up space in the huge OS page area (which cannot be purged as huge OS pages are pinned to physical memory). The huge pages are usually allocated evenly among NUMA nodes. We can use MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N where N is the numa node (starting at 0) to allocate all the huge pages at a specific numa node instead.
  • +
+

Use caution when using fork in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write for all pages in the original process including the huge OS pages. When any memory is now written in that area, the OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in large increments.

diff --git a/depends/mimalloc/docs/functions.html b/depends/mimalloc/docs/functions.html index 373fafe2b8b4..043b8621bef0 100644 --- a/depends/mimalloc/docs/functions.html +++ b/depends/mimalloc/docs/functions.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Data Fields + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,35 +91,34 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
Here is a list of all struct and union fields with links to the structures/unions they belong to:
diff --git a/depends/mimalloc/docs/functions_vars.html b/depends/mimalloc/docs/functions_vars.html index a12ef6226574..ec9a4de13482 100644 --- a/depends/mimalloc/docs/functions_vars.html +++ b/depends/mimalloc/docs/functions_vars.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Data Fields - Variables + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,35 +91,34 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
diff --git a/depends/mimalloc/docs/group__aligned.html b/depends/mimalloc/docs/group__aligned.html index bd11f30f2732..f107758e8395 100644 --- a/depends/mimalloc/docs/group__aligned.html +++ b/depends/mimalloc/docs/group__aligned.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Aligned Allocation + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,180 +91,142 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Aligned Allocation
+
Aligned Allocation

Allocating aligned memory blocks. More...

- - - - -

-Macros

#define MI_ALIGNMENT_MAX
 The maximum supported alignment size (currently 1MiB). More...
 
- - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + +

+

Functions

void * mi_malloc_aligned (size_t size, size_t alignment)
 Allocate size bytes aligned by alignment. More...
 
void * mi_zalloc_aligned (size_t size, size_t alignment)
 
void * mi_calloc_aligned (size_t count, size_t size, size_t alignment)
 
void * mi_realloc_aligned (void *p, size_t newsize, size_t alignment)
 
void * mi_malloc_aligned_at (size_t size, size_t alignment, size_t offset)
 Allocate size bytes aligned by alignment at a specified offset. More...
 
void * mi_zalloc_aligned_at (size_t size, size_t alignment, size_t offset)
 
void * mi_calloc_aligned_at (size_t count, size_t size, size_t alignment, size_t offset)
 
void * mi_realloc_aligned_at (void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_malloc_aligned (size_t size, size_t alignment)
 Allocate size bytes aligned by alignment.
 
void * mi_zalloc_aligned (size_t size, size_t alignment)
 
void * mi_calloc_aligned (size_t count, size_t size, size_t alignment)
 
void * mi_realloc_aligned (void *p, size_t newsize, size_t alignment)
 
void * mi_malloc_aligned_at (size_t size, size_t alignment, size_t offset)
 Allocate size bytes aligned by alignment at a specified offset.
 
void * mi_zalloc_aligned_at (size_t size, size_t alignment, size_t offset)
 
void * mi_calloc_aligned_at (size_t count, size_t size, size_t alignment, size_t offset)
 
void * mi_realloc_aligned_at (void *p, size_t newsize, size_t alignment, size_t offset)
 

Detailed Description

Allocating aligned memory blocks.

-

Macro Definition Documentation

- -

◆ MI_ALIGNMENT_MAX

- -
-
- - - - -
#define MI_ALIGNMENT_MAX
-
- -

The maximum supported alignment size (currently 1MiB).

- -
-
+

Note that alignment always follows size for consistency with the unaligned allocation API, but unfortunately this differs from posix_memalign and aligned_alloc in the C library.

Function Documentation

- -

◆ mi_calloc_aligned()

+ +

◆ mi_calloc_aligned()

- + - - + - - + - - - - - - - +
void* mi_calloc_aligned void * mi_calloc_aligned (size_t count, size_t count,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_calloc_aligned_at()

+ +

◆ mi_calloc_aligned_at()

- + - - + - - + - - + - - - - - - - +
void* mi_calloc_aligned_at void * mi_calloc_aligned_at (size_t count, size_t count,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_malloc_aligned()

+ +

◆ mi_malloc_aligned()

- + - - + - - - - - - - +
void* mi_malloc_aligned void * mi_malloc_aligned (size_t size, size_t size,
size_t alignment 
)size_t alignment )
-

Allocate size bytes aligned by alignment.

+

Allocate size bytes aligned by alignment.

Parameters
- +
sizenumber of bytes to allocate.
alignmentthe minimal alignment of the allocated memory. Must be less than MI_ALIGNMENT_MAX.
alignmentthe minimal alignment of the allocated memory.
-
Returns
pointer to the allocated memory or NULL if out of memory. The returned pointer is aligned by alignment, i.e. (uintptr_t)p % alignment == 0.
-

Returns a unique pointer if called with size 0.

See also
_aligned_malloc (on Windows)
+
Returns
pointer to the allocated memory or NULL if out of memory, or if the alignment is not a power of 2 (including 0). The size is unrestricted (and does not have to be an integral multiple of the alignment). The returned pointer is aligned by alignment, i.e. (uintptr_t)p % alignment == 0. Returns a unique pointer if called with size 0.
+

Note that alignment always follows size for consistency with the unaligned allocation API, but unfortunately this differs from posix_memalign and aligned_alloc in the C library.

+
See also
aligned_alloc (in the standard C11 library, with switched arguments!)
+
+_aligned_malloc (on Windows)
aligned_alloc (on BSD, with switched arguments!)
@@ -271,182 +236,142 @@

-

◆ mi_malloc_aligned_at()

+ +

◆ mi_malloc_aligned_at()

- + - - + - - + - - - - - - - +
void* mi_malloc_aligned_at void * mi_malloc_aligned_at (size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
-

Allocate size bytes aligned by alignment at a specified offset.

+

Allocate size bytes aligned by alignment at a specified offset.

Parameters
- +
sizenumber of bytes to allocate.
alignmentthe minimal alignment of the allocated memory at offset.
alignmentthe minimal alignment of the allocated memory at offset.
offsetthe offset that should be aligned.
-
Returns
pointer to the allocated memory or NULL if out of memory. The returned pointer is aligned by alignment at offset, i.e. ((uintptr_t)p + offset) % alignment == 0.
-

Returns a unique pointer if called with size 0.

See also
_aligned_offset_malloc (on Windows)
+
Returns
pointer to the allocated memory or NULL if out of memory, or if the alignment is not a power of 2 (including 0). The size is unrestricted (and does not have to be an integral multiple of the alignment). The returned pointer is aligned by alignment, i.e. (uintptr_t)p % alignment == 0. Returns a unique pointer if called with size 0.
+
See also
_aligned_offset_malloc (on Windows)
- -

◆ mi_realloc_aligned()

+ +

◆ mi_realloc_aligned()

- + - - + - - + - - - - - - - +
void* mi_realloc_aligned void * mi_realloc_aligned (void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment 
)size_t alignment )
- -

◆ mi_realloc_aligned_at()

+ +

◆ mi_realloc_aligned_at()

- + - - + - - + - - + - - - - - - - +
void* mi_realloc_aligned_at void * mi_realloc_aligned_at (void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_zalloc_aligned()

+ +

◆ mi_zalloc_aligned()

- + - - + - - - - - - - +
void* mi_zalloc_aligned void * mi_zalloc_aligned (size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_zalloc_aligned_at()

+ +

◆ mi_zalloc_aligned_at()

- + - - + - - + - - - - - - - +
void* mi_zalloc_aligned_at void * mi_zalloc_aligned_at (size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
@@ -458,7 +383,7 @@

diff --git a/depends/mimalloc/docs/group__aligned.js b/depends/mimalloc/docs/group__aligned.js index 06ccb0c370bb..d77e5036c311 100644 --- a/depends/mimalloc/docs/group__aligned.js +++ b/depends/mimalloc/docs/group__aligned.js @@ -1,12 +1,11 @@ var group__aligned = [ - [ "MI_ALIGNMENT_MAX", "group__aligned.html#ga83c03016066b438f51a8095e9140be06", null ], - [ "mi_calloc_aligned", "group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9", null ], - [ "mi_calloc_aligned_at", "group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3", null ], - [ "mi_malloc_aligned", "group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56", null ], - [ "mi_malloc_aligned_at", "group__aligned.html#ga5850da130c936bd77db039dcfbc8295d", null ], - [ "mi_realloc_aligned", "group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae", null ], - [ "mi_realloc_aligned_at", "group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb", null ], - [ "mi_zalloc_aligned", "group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819", null ], - [ "mi_zalloc_aligned_at", "group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8", null ] + [ "mi_calloc_aligned", "group__aligned.html#ga424ef386fb1f9f8e0a86ab53f16eaaf1", null ], + [ "mi_calloc_aligned_at", "group__aligned.html#ga977f96bd2c5c141bcd70e6685c90d6c3", null ], + [ "mi_malloc_aligned", "group__aligned.html#ga69578ff1a98ca16e1dcd02c0995cd65c", null ], + [ "mi_malloc_aligned_at", "group__aligned.html#ga2022f71b95a7cd6cce1b6e07752ae8ca", null ], + [ "mi_realloc_aligned", "group__aligned.html#ga5d7a46d054b4d7abe9d8d2474add2edf", null ], + [ "mi_realloc_aligned_at", "group__aligned.html#gad06dcf2bb8faadb2c8ea61ee5d24bbf6", null ], + [ "mi_zalloc_aligned", "group__aligned.html#gaac7d0beb782f9b9ac31f47492b130f82", null ], + [ "mi_zalloc_aligned_at", "group__aligned.html#ga7c1778805ce50ebbf02ccbd5e39d5dba", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__analysis.html b/depends/mimalloc/docs/group__analysis.html index 883099fe50a7..882830ebb5b4 100644 --- a/depends/mimalloc/docs/group__analysis.html +++ b/depends/mimalloc/docs/group__analysis.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Heap Introspection + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,9 +91,16 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
@@ -98,46 +108,48 @@ Data Structures | Typedefs | Functions
-
-
Heap Introspection
+
Heap Introspection

Inspect the heap at runtime. More...

- - - + +

+

Data Structures

struct  mi_heap_area_t
 An area of heap space contains blocks of a single size. More...
struct  mi_heap_area_t
 An area of heap space contains blocks of a single size. More...
 
- - - - + + +

+

Typedefs

typedef bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
 Visitor function passed to mi_heap_visit_blocks() More...
 
typedef bool mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
 Visitor function passed to mi_heap_visit_blocks()
 
- - - + + - - + + - - + + - - + + + + +

+

Functions

bool mi_heap_contains_block (mi_heap_t *heap, const void *p)
 Does a heap contain a pointer to a previously allocated block? More...
bool mi_heap_contains_block (mi_heap_t *heap, const void *p)
 Does a heap contain a pointer to a previously allocated block?
 
bool mi_heap_check_owned (mi_heap_t *heap, const void *p)
 Check safely if any pointer is part of a heap. More...
bool mi_heap_check_owned (mi_heap_t *heap, const void *p)
 Check safely if any pointer is part of a heap.
 
bool mi_check_owned (const void *p)
 Check safely if any pointer is part of the default heap of this thread. More...
bool mi_check_owned (const void *p)
 Check safely if any pointer is part of the default heap of this thread.
 
bool mi_heap_visit_blocks (const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
 Visit all areas and blocks in a heap. More...
bool mi_heap_visit_blocks (const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
 Visit all areas and blocks in a heap.
 
bool mi_abandoned_visit_blocks (mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun *visitor, void *arg)
 Visit all areas and blocks in abandoned heaps.
 

Detailed Description

Inspect the heap at runtime.


Data Structure Documentation

-

◆ mi_heap_area_t

+

◆ mi_heap_area_t

@@ -152,31 +164,43 @@

+size_t +void * +size_t +size_t + + + + + + +size_t - - +
Data Fields
-size_t block_size size in bytes of one block
-void * blocks start of the area containing heap blocks
-size_t committed current committed bytes of this area
-size_t +full_block_size +size in bytes of a full block including padding and metadata.
+int +heap_tag +heap tag associated with this area (see mi_heap_new_ex)
+size_t reserved bytes reserved for this area
-size_t used @@ -186,27 +210,77 @@

Typedef Documentation

-
-

◆ mi_block_visit_fun

+ +

◆ mi_block_visit_fun

- +
typedef bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)typedef bool mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
-

Visitor function passed to mi_heap_visit_blocks()

-
Returns
true if ok, false to stop visiting (i.e. break)
-

This function is always first called for every area with block as a NULL pointer. If visit_all_blocks was true, the function is then called for every allocated block in that area.

+

Visitor function passed to mi_heap_visit_blocks()

+
Returns
true if ok, false to stop visiting (i.e. break)
+

This function is always first called for every area with block as a NULL pointer. If visit_all_blocks was true, the function is then called for every allocated block in that area.

Function Documentation

- -

◆ mi_check_owned()

+ +

◆ mi_abandoned_visit_blocks()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
bool mi_abandoned_visit_blocks (mi_subproc_id_t subproc_id,
int heap_tag,
bool visit_blocks,
mi_block_visit_fun * visitor,
void * arg )
+
+ +

Visit all areas and blocks in abandoned heaps.

+
Parameters
+ + + + + + +
subproc_idThe sub-process id associated with the abandoned heaps.
heap_tagVisit only abandoned memory with the specified heap tag, use -1 to visit all abandoned memory.
visit_blocksIf true visits all allocated blocks, otherwise visitor is only called for every heap area.
visitorThis function is called for every area in the heap (with block as NULL). If visit_all_blocks is true, visitor is also called for every allocated block in every area (with block!=NULL). return false from this function to stop visiting early.
argextra argument passed to the visitor.
+
+
+
Returns
true if all areas and blocks were visited.
+

Note: requires the option mi_option_visit_abandoned to be set at the start of the program.

+ +
+
+ +

◆ mi_check_owned()

@@ -214,8 +288,7 @@

bool mi_check_owned

(const void * p)const void * p)
@@ -228,15 +301,15 @@

Returns
true if p points to a block in default heap of this thread.
-

Note: expensive function, linear in the pages in the heap.

See also
mi_heap_contains_block()
+
Returns
true if p points to a block in default heap of this thread.
+

Note: expensive function, linear in the pages in the heap.

See also
mi_heap_contains_block()
-mi_heap_get_default()
+mi_heap_get_default()

- -

◆ mi_heap_check_owned()

+ +

◆ mi_heap_check_owned()

@@ -244,19 +317,12 @@

bool mi_heap_check_owned ( - mi_heap_t *  - heap, + mi_heap_t * heap, - const void *  - p  - - - - ) - + const void * p )

@@ -269,15 +335,15 @@

Returns
true if p points to a block in heap.
-

Note: expensive function, linear in the pages in the heap.

See also
mi_heap_contains_block()
+
Returns
true if p points to a block in heap.
+

Note: expensive function, linear in the pages in the heap.

See also
mi_heap_contains_block()
-mi_heap_get_default()
+mi_heap_get_default()

- -

◆ mi_heap_contains_block()

+ +

◆ mi_heap_contains_block()

@@ -285,19 +351,12 @@

bool mi_heap_contains_block ( - mi_heap_t *  - heap, + mi_heap_t * heap, - const void *  - p  - - - - ) - + const void * p )

@@ -310,13 +369,13 @@

Returns
true if the block pointed to by p is in the heap.
-
See also
mi_heap_check_owned()
+
Returns
true if the block pointed to by p is in the heap.
+
See also
mi_heap_check_owned()

- -

◆ mi_heap_visit_blocks()

+ +

◆ mi_heap_visit_blocks()

@@ -372,7 +422,7 @@

diff --git a/depends/mimalloc/docs/group__analysis.js b/depends/mimalloc/docs/group__analysis.js index 351783628ddf..5e72b94a5e4f 100644 --- a/depends/mimalloc/docs/group__analysis.js +++ b/depends/mimalloc/docs/group__analysis.js @@ -4,10 +4,13 @@ var group__analysis = [ "block_size", "group__analysis.html#a332a6c14d736a99699d5453a1cb04b41", null ], [ "blocks", "group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8", null ], [ "committed", "group__analysis.html#ab47526df656d8837ec3e97f11b83f835", null ], + [ "full_block_size", "group__analysis.html#ab53664e31d7fe2564f8d42041ef75cb3", null ], + [ "heap_tag", "group__analysis.html#a2b7a0c92ece8daf46b558efc990ebdc1", null ], [ "reserved", "group__analysis.html#ae848a3e6840414891035423948ca0383", null ], [ "used", "group__analysis.html#ab820302c5cd0df133eb8e51650a008b4", null ] ] ], - [ "mi_block_visit_fun", "group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65", null ], + [ "mi_block_visit_fun", "group__analysis.html#ga8255dc9371e6b299d9802a610c4e34ec", null ], + [ "mi_abandoned_visit_blocks", "group__analysis.html#ga6a4865a887b2ec5247854af61562503c", null ], [ "mi_check_owned", "group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5", null ], [ "mi_heap_check_owned", "group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377", null ], [ "mi_heap_contains_block", "group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af", null ], diff --git a/depends/mimalloc/docs/group__analysis_structmi__heap__area__t.js b/depends/mimalloc/docs/group__analysis_structmi__heap__area__t.js index 2dbabc5cde17..41a8e77abecc 100644 --- a/depends/mimalloc/docs/group__analysis_structmi__heap__area__t.js +++ b/depends/mimalloc/docs/group__analysis_structmi__heap__area__t.js @@ -3,6 +3,8 @@ var group__analysis_structmi__heap__area__t = [ "block_size", "group__analysis.html#a332a6c14d736a99699d5453a1cb04b41", null ], [ "blocks", "group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8", null ], [ "committed", "group__analysis.html#ab47526df656d8837ec3e97f11b83f835", null ], + [ "full_block_size", "group__analysis.html#ab53664e31d7fe2564f8d42041ef75cb3", null ], + [ "heap_tag", "group__analysis.html#a2b7a0c92ece8daf46b558efc990ebdc1", null ], [ "reserved", "group__analysis.html#ae848a3e6840414891035423948ca0383", null ], [ "used", "group__analysis.html#ab820302c5cd0df133eb8e51650a008b4", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__cpp.html b/depends/mimalloc/docs/group__cpp.html index e81179feb643..d998433469de 100644 --- a/depends/mimalloc/docs/group__cpp.html +++ b/depends/mimalloc/docs/group__cpp.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: C++ wrappers + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,59 +91,65 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
C++ wrappers
+
C++ wrappers

mi_ prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler and raising a std::bad_alloc exception on failure. More...

- - - + +

+

Data Structures

struct  mi_stl_allocator< T >
 std::allocator implementation for mimalloc for use in STL containers. More...
struct  mi_stl_allocator< T >
 std::allocator implementation for mimalloc for use in STL containers. More...
 
- - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + +

+

Functions

void * mi_new (std::size_t n) noexcept(false)
 like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_n (size_t count, size_t size) noexcept(false)
 like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false)
 like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_nothrow (size_t n)
 like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure. More...
 
void * mi_new_aligned_nothrow (size_t n, size_t alignment)
 like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure. More...
 
void * mi_new_realloc (void *p, size_t newsize)
 like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_reallocn (void *p, size_t newcount, size_t size)
 like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new (std::size_t n) noexcept(false)
 like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.
 
void * mi_new_n (size_t count, size_t size) noexcept(false)
 like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.
 
void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false)
 like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.
 
void * mi_new_nothrow (size_t n)
 like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
 
void * mi_new_aligned_nothrow (size_t n, size_t alignment)
 like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
 
void * mi_new_realloc (void *p, size_t newsize)
 like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.
 
void * mi_new_reallocn (void *p, size_t newcount, size_t size)
 like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.
 

Detailed Description

mi_ prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler and raising a std::bad_alloc exception on failure.

-

Note: use the mimalloc-new-delete.h header to override the new and delete operators globally. The wrappers here are mostly for convience for library writers that need to interface with mimalloc from C++.

+

Note: use the mimalloc-new-delete.h header to override the new and delete operators globally. The wrappers here are mostly for convenience for library writers that need to interface with mimalloc from C++.


Data Structure Documentation

-

◆ mi_stl_allocator

+

◆ mi_stl_allocator

@@ -150,10 +159,8 @@

-

template<class T>
-struct mi_stl_allocator< T >

- -

std::allocator implementation for mimalloc for use in STL containers.

+
template<class T>
+struct mi_stl_allocator< T >

std::allocator implementation for mimalloc for use in STL containers.

For example:

Function Documentation

-
-

◆ mi_new()

+ +

◆ mi_new()

- - - - - -
- + - - +
void* mi_new void * mi_new (std::size_t n)std::size_t n)
-
-noexcept
-

like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+

like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

- -

◆ mi_new_aligned()

+ +

◆ mi_new_aligned()

- - - - - -
- + - - + - - - - - - - +
void* mi_new_aligned void * mi_new_aligned (std::size_t n, std::size_t n,
std::align_val_t alignment 
)std::align_val_t alignment )
-
-noexcept
-

like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+

like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

- -

◆ mi_new_aligned_nothrow()

+ +

◆ mi_new_aligned_nothrow()

- + - - + - - - - - - - +
void* mi_new_aligned_nothrow void * mi_new_aligned_nothrow (size_t n, size_t n,
size_t alignment 
)size_t alignment )
-

like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.

+

like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.

- -

◆ mi_new_n()

+ +

◆ mi_new_n()

- - - - - -
- + - - + - - - - - - - +
void* mi_new_n void * mi_new_n (size_t count, size_t count,
size_t size 
)size_t size )
-
-noexcept
-

like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+

like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

- -

◆ mi_new_nothrow()

+ +

◆ mi_new_nothrow()

- + - - +
void* mi_new_nothrow void * mi_new_nothrow (size_t n)size_t n)
-

like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.

+

like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.

- -

◆ mi_new_realloc()

+ +

◆ mi_new_realloc()

- + - - + - - - - - - - +
void* mi_new_realloc void * mi_new_realloc (void * p, void * p,
size_t newsize 
)size_t newsize )
-

like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+

like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

- -

◆ mi_new_reallocn()

+ +

◆ mi_new_reallocn()

- + - - + - - + - - - - - - - +
void* mi_new_reallocn void * mi_new_reallocn (void * p, void * p,
size_t newcount, size_t newcount,
size_t size 
)size_t size )
-

like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+

like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

@@ -386,7 +331,7 @@

diff --git a/depends/mimalloc/docs/group__cpp.js b/depends/mimalloc/docs/group__cpp.js index 207066468529..355f1ac6d053 100644 --- a/depends/mimalloc/docs/group__cpp.js +++ b/depends/mimalloc/docs/group__cpp.js @@ -1,11 +1,11 @@ var group__cpp = [ - [ "mi_stl_allocator", "group__cpp.html#structmi__stl__allocator", null ], - [ "mi_new", "group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545", null ], - [ "mi_new_aligned", "group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3", null ], - [ "mi_new_aligned_nothrow", "group__cpp.html#gab5e29558926d934c3f1cae8c815f942c", null ], - [ "mi_new_n", "group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81", null ], - [ "mi_new_nothrow", "group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a", null ], - [ "mi_new_realloc", "group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e", null ], - [ "mi_new_reallocn", "group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907", null ] + [ "mi_stl_allocator< T >", "group__cpp.html#structmi__stl__allocator", null ], + [ "mi_new", "group__cpp.html#ga633d96e3bc7011f960df9f3b2731fc6a", null ], + [ "mi_new_aligned", "group__cpp.html#ga79c54da0b4b4ce9fcc11d2f6ef6675f8", null ], + [ "mi_new_aligned_nothrow", "group__cpp.html#ga92ae00b6dd64406c7e64557711ec04b7", null ], + [ "mi_new_n", "group__cpp.html#gadd11b85c15d21d308386844b5233856c", null ], + [ "mi_new_nothrow", "group__cpp.html#ga5cb4f120d1f7296074256215aa9a9e54", null ], + [ "mi_new_realloc", "group__cpp.html#ga6867d89baf992728e0cc20a1f47db4d0", null ], + [ "mi_new_reallocn", "group__cpp.html#gaace912ce086682d56f3ce9f7638d9d67", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__extended.html b/depends/mimalloc/docs/group__extended.html index e54991180272..4eea7c69bc5e 100644 --- a/depends/mimalloc/docs/group__extended.html +++ b/depends/mimalloc/docs/group__extended.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Extended Functions + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,9 +91,16 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
@@ -98,106 +108,144 @@ Macros | Typedefs | Functions
-
-
Extended Functions
+
Extended Functions

Extended functionality. More...

- - - + +

+

Macros

#define MI_SMALL_SIZE_MAX
 Maximum size allowed for small allocations in mi_malloc_small and mi_zalloc_small (usually 128*sizeof(void*) (= 1KB on 64-bit systems)) More...
#define MI_SMALL_SIZE_MAX
 Maximum size allowed for small allocations in mi_malloc_small and mi_zalloc_small (usually 128*sizeof(void*) (= 1KB on 64-bit systems))
 
- - - - - - - - - - + + + + + + + + + + + + + + +

+

Typedefs

typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
 Type of deferred free functions. More...
 
typedef void() mi_output_fun(const char *msg, void *arg)
 Type of output functions. More...
 
typedef void() mi_error_fun(int err, void *arg)
 Type of error callback functions. More...
 
typedef void mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
 Type of deferred free functions.
 
typedef void mi_output_fun(const char *msg, void *arg)
 Type of output functions.
 
typedef void mi_error_fun(int err, void *arg)
 Type of error callback functions.
 
typedef int mi_arena_id_t
 Mimalloc uses large (virtual) memory areas, called "arena"s, from the OS to manage its memory.
 
typedef void * mi_subproc_id_t
 A process can associate threads with sub-processes.
 
- - - - - - - - - + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+

Functions

void * mi_malloc_small (size_t size)
 Allocate a small object. More...
 
void * mi_zalloc_small (size_t size)
 Allocate a zero initialized small object. More...
 
size_t mi_usable_size (void *p)
 Return the available bytes in a memory block. More...
void * mi_malloc_small (size_t size)
 Allocate a small object.
 
void * mi_zalloc_small (size_t size)
 Allocate a zero initialized small object.
 
size_t mi_usable_size (void *p)
 Return the available bytes in a memory block.
 
size_t mi_good_size (size_t size)
 Return the used allocation size. More...
size_t mi_good_size (size_t size)
 Return the used allocation size.
 
void mi_collect (bool force)
 Eagerly free memory. More...
void mi_collect (bool force)
 Eagerly free memory.
 
void mi_stats_print (void *out)
 Deprecated. More...
void mi_stats_print (void *out)
 Deprecated.
 
void mi_stats_print_out (mi_output_fun *out, void *arg)
 Print the main statistics. More...
void mi_stats_print_out (mi_output_fun *out, void *arg)
 Print the main statistics.
 
void mi_stats_reset (void)
 Reset statistics. More...
void mi_stats_reset (void)
 Reset statistics.
 
void mi_stats_merge (void)
 Merge thread local statistics with the main statistics and reset. More...
void mi_stats_merge (void)
 Merge thread local statistics with the main statistics and reset.
 
void mi_thread_init (void)
 Initialize mimalloc on a thread. More...
void mi_thread_init (void)
 Initialize mimalloc on a thread.
 
void mi_thread_done (void)
 Uninitialize mimalloc on a thread. More...
void mi_thread_done (void)
 Uninitialize mimalloc on a thread.
 
void mi_thread_stats_print_out (mi_output_fun *out, void *arg)
 Print out heap statistics for this thread. More...
void mi_thread_stats_print_out (mi_output_fun *out, void *arg)
 Print out heap statistics for this thread.
 
void mi_register_deferred_free (mi_deferred_free_fun *deferred_free, void *arg)
 Register a deferred free function. More...
void mi_register_deferred_free (mi_deferred_free_fun *deferred_free, void *arg)
 Register a deferred free function.
 
void mi_register_output (mi_output_fun *out, void *arg)
 Register an output function. More...
void mi_register_output (mi_output_fun *out, void *arg)
 Register an output function.
 
void mi_register_error (mi_error_fun *errfun, void *arg)
 Register an error callback function. More...
void mi_register_error (mi_error_fun *errfun, void *arg)
 Register an error callback function.
 
bool mi_is_in_heap_region (const void *p)
 Is a pointer part of our heap? More...
bool mi_is_in_heap_region (const void *p)
 Is a pointer part of our heap?
 
int mi_reserve_os_memory (size_t size, bool commit, bool allow_large)
 Reserve OS memory for use by mimalloc. More...
int mi_reserve_os_memory (size_t size, bool commit, bool allow_large)
 Reserve OS memory for use by mimalloc.
 
bool mi_manage_os_memory (void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node)
 Manage a particular memory area for use by mimalloc. More...
bool mi_manage_os_memory (void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node)
 Manage a particular memory area for use by mimalloc.
 
int mi_reserve_huge_os_pages_interleave (size_t pages, size_t numa_nodes, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs seconds. More...
int mi_reserve_huge_os_pages_interleave (size_t pages, size_t numa_nodes, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs seconds.
 
int mi_reserve_huge_os_pages_at (size_t pages, int numa_node, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs seconds. More...
int mi_reserve_huge_os_pages_at (size_t pages, int numa_node, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs seconds.
 
bool mi_is_redirected ()
 Is the C runtime malloc API redirected? More...
bool mi_is_redirected ()
 Is the C runtime malloc API redirected?
 
void mi_process_info (size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
 Return process information (time and memory usage). More...
void mi_process_info (size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
 Return process information (time and memory usage).
 
void mi_debug_show_arenas (bool show_inuse, bool show_abandoned, bool show_purge)
 Show all current arena's.
 
void * mi_arena_area (mi_arena_id_t arena_id, size_t *size)
 Return the size of an arena.
 
int mi_reserve_huge_os_pages_at_ex (size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t *arena_id)
 Reserve huge OS pages (1GiB) into a single arena.
 
int mi_reserve_os_memory_ex (size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t *arena_id)
 Reserve OS memory to be managed in an arena.
 
bool mi_manage_os_memory_ex (void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t *arena_id)
 Manage externally allocated memory as a mimalloc arena.
 
mi_heap_tmi_heap_new_in_arena (mi_arena_id_t arena_id)
 Create a new heap that only allocates in the specified arena.
 
mi_heap_tmi_heap_new_ex (int heap_tag, bool allow_destroy, mi_arena_id_t arena_id)
 Create a new heap.
 
mi_subproc_id_t mi_subproc_main (void)
 Get the main sub-process identifier.
 
mi_subproc_id_t mi_subproc_new (void)
 Create a fresh sub-process (with no associated threads yet).
 
void mi_subproc_delete (mi_subproc_id_t subproc)
 Delete a previously created sub-process.
 
void mi_subproc_add_current_thread (mi_subproc_id_t subproc)
 Add the current thread to the given sub-process.
 

Detailed Description

Extended functionality.

Macro Definition Documentation

- -

◆ MI_SMALL_SIZE_MAX

+ +

◆ MI_SMALL_SIZE_MAX

@@ -208,19 +256,36 @@

-

Maximum size allowed for small allocations in mi_malloc_small and mi_zalloc_small (usually 128*sizeof(void*) (= 1KB on 64-bit systems))

+

Maximum size allowed for small allocations in mi_malloc_small and mi_zalloc_small (usually 128*sizeof(void*) (= 1KB on 64-bit systems))

Typedef Documentation

- -

◆ mi_deferred_free_fun

+ +

◆ mi_arena_id_t

- + + +
typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)typedef int mi_arena_id_t
+
+ +

Mimalloc uses large (virtual) memory areas, called "arena"s, from the OS to manage its memory.

+

Each arena has an associated identifier.

+ +
+
+ +

◆ mi_deferred_free_fun

+ + - -

◆ mi_error_fun

+ +

◆ mi_error_fun

- -

◆ mi_output_fun

+ +

◆ mi_output_fun

- +
typedef void() mi_output_fun(const char *msg, void *arg)typedef void mi_output_fun(const char *msg, void *arg)
@@ -282,13 +347,61 @@

See also
mi_register_output()
+
See also
mi_register_output()
+ +

+
+ +

◆ mi_subproc_id_t

+ +
+
+ + + + +
typedef void* mi_subproc_id_t
+
+ +

A process can associate threads with sub-processes.

+

A sub-process will not reclaim memory from (abandoned heaps/threads) other subprocesses.

Function Documentation

- -

◆ mi_collect()

+ +

◆ mi_arena_area()

+ +
+
+ + + + + + + + + + + +
void * mi_arena_area (mi_arena_id_t arena_id,
size_t * size )
+
+ +

Return the size of an arena.

+
Parameters
+ + + +
arena_idThe arena identifier.
sizeReturned size in bytes of the (virtual) arena area.
+
+
+
Returns
base address of the arena.
+ +
+
+ +

◆ mi_collect()

@@ -296,8 +409,7 @@

void mi_collect ( - bool  - force) + bool force) @@ -306,7 +418,7 @@

Parameters
- +
forceIf true, aggressively return memory to the OS (can be expensive!)
forceIf true, aggressively return memory to the OS (can be expensive!)
@@ -314,8 +426,44 @@

-

◆ mi_good_size()

+ +

◆ mi_debug_show_arenas()

+ +
+
+ + + + + + + + + + + + + + + + +
void mi_debug_show_arenas (bool show_inuse,
bool show_abandoned,
bool show_purge )
+
+ +

Show all current arena's.

+
Parameters
+ + + + +
show_inuseShow the arena blocks that are in use.
show_abandonedShow the abandoned arena blocks.
show_purgeShow arena blocks scheduled for purging.
+
+
+ +
+
+ +

◆ mi_good_size()

- -

◆ mi_is_in_heap_region()

+ +

◆ mi_heap_new_ex()

+ +
+
+ + + + + + + + + + + + + + + + +
mi_heap_t * mi_heap_new_ex (int heap_tag,
bool allow_destroy,
mi_arena_id_t arena_id )
+
+ +

Create a new heap.

+
Parameters
+ + + + +
heap_tagThe heap tag associated with this heap; heaps only reclaim memory between heaps with the same tag.
allow_destroyIs mi_heap_destroy allowed? Not allowing this allows the heap to reclaim memory from terminated threads.
arena_idIf not 0, the heap will only allocate from the specified arena.
+
+
+
Returns
A new heap or NULL on failure.
+

The arena_id can be used by runtimes to allocate only in a specified pre-reserved arena. This is used for example for a compressed pointer heap in Koka. The heap_tag enables heaps to keep objects of a certain type isolated to heaps with that tag. This is used for example in the CPython integration.

+ +
+
+ +

◆ mi_heap_new_in_arena()

+ +
+
+ + + + + + + +
mi_heap_t * mi_heap_new_in_arena (mi_arena_id_t arena_id)
+
+ +

Create a new heap that only allocates in the specified arena.

+
Parameters
+ + +
arena_idThe arena identifier.
+
+
+
Returns
The new heap or NULL.
+ +
+
+ +

◆ mi_is_in_heap_region()

- -

◆ mi_is_redirected()

+ +

◆ mi_is_redirected()

- -

◆ mi_malloc_small()

+ +

◆ mi_malloc_small()

- -

◆ mi_manage_os_memory()

+ +

◆ mi_manage_os_memory()

- -

◆ mi_process_info()

+ +

◆ mi_manage_os_memory_ex()

- + - - + + + + + + + + + + + + + + + + - - + - - + - - + - - + + +
void mi_process_info bool mi_manage_os_memory_ex (size_t * elapsed_msecs, void * start,
size_t size,
bool is_committed,
bool is_large,
size_t * user_msecs, bool is_zero,
size_t * system_msecs, int numa_node,
size_t * current_rss, bool exclusive,
size_t * peak_rss, mi_arena_id_t * arena_id )
+
+ +

Manage externally allocated memory as a mimalloc arena.

+

This memory will not be freed by mimalloc.

Parameters
+ + + + + + + + + +
startStart address of the area.
sizeSize in bytes of the area.
is_committedIs the memory already committed?
is_largeDoes it consist of (pinned) large OS pages?
is_zeroIs the memory zero-initialized?
numa_nodeAssociated NUMA node, or -1 to have no NUMA preference.
exclusiveIs the arena exclusive (where only heaps associated with the arena can allocate in it)
arena_idThe new arena identifier.
+
+
+
Returns
true if successful.
+ +
+
+ +

◆ mi_process_info()

+ +
+
+ + + + + - - + - - + - - + + - - + + + + + + + + + + + + + + + +
void mi_process_info (size_t * elapsed_msecs,
size_t * current_commit, size_t * user_msecs,
size_t * peak_commit, size_t * system_msecs,
size_t * page_faults size_t * current_rss,
)size_t * peak_rss,
size_t * current_commit,
size_t * peak_commit,
size_t * page_faults )
- -

◆ mi_register_deferred_free()

+ +

◆ mi_register_deferred_free()

@@ -573,19 +825,12 @@

void mi_register_deferred_free ( - mi_deferred_free_fun *  - deferred_free, + mi_deferred_free_fun * deferred_free, - void *  - arg  - - - - ) - + void * arg )

@@ -593,17 +838,17 @@

Parameters
- +
deferred_freeAddress of a deferred free-ing function or NULL to unregister.
deferred_freeAddress of a deferred free-ing function or NULL to unregister.
argArgument that will be passed on to the deferred free function.
-

Some runtime systems use deferred free-ing, for example when using reference counting to limit the worst case free time. Such systems can register (re-entrant) deferred free function to free more memory on demand. When the force parameter is true all possible memory should be freed. The per-thread heartbeat parameter is monotonically increasing and guaranteed to be deterministic if the program allocates deterministically. The deferred_free function is guaranteed to be called deterministically after some number of allocations (regardless of freeing or available free memory). At most one deferred_free function can be active.

+

Some runtime systems use deferred free-ing, for example when using reference counting to limit the worst case free time. Such systems can register (re-entrant) deferred free function to free more memory on demand. When the force parameter is true all possible memory should be freed. The per-thread heartbeat parameter is monotonically increasing and guaranteed to be deterministic if the program allocates deterministically. The deferred_free function is guaranteed to be called deterministically after some number of allocations (regardless of freeing or available free memory). At most one deferred_free function can be active.

- -

◆ mi_register_error()

+ +

◆ mi_register_error()

@@ -611,19 +856,12 @@

void mi_register_error ( - mi_error_fun *  - errfun, + mi_error_fun * errfun, - void *  - arg  - - - - ) - + void * arg )

@@ -631,23 +869,23 @@

Parameters
- +
errfunThe error function that is called on an error (use NULL for default)
errfunThe error function that is called on an error (use NULL for default)
argExtra argument that will be passed on to the error function.
-

The errfun function is called on an error in mimalloc after emitting an error message (through the output function). It as always legal to just return from the errfun function in which case allocation functions generally return NULL or ignore the condition. The default function only calls abort() when compiled in secure mode with an EFAULT error. The possible error codes are:

- -

◆ mi_register_output()

+ +

◆ mi_register_output()

@@ -655,19 +893,12 @@

void mi_register_output ( - mi_output_fun *  - out, + mi_output_fun * out, - void *  - arg  - - - - ) - + void * arg )

@@ -684,8 +915,8 @@

-

◆ mi_reserve_huge_os_pages_at()

+ +

◆ mi_reserve_huge_os_pages_at()

- -

◆ mi_reserve_huge_os_pages_interleave()

+ +

◆ mi_reserve_huge_os_pages_at_ex()

- + - - + + + + + + + + + + + + + + + + - - + + +
int mi_reserve_huge_os_pages_interleave int mi_reserve_huge_os_pages_at_ex (size_t pages, size_t pages,
int numa_node,
size_t timeout_msecs,
bool exclusive,
size_t numa_nodes, mi_arena_id_t * arena_id )
+
+ +

Reserve huge OS pages (1GiB) into a single arena.

+
Parameters
+ + + + + + +
pagesNumber of 1GiB pages to reserve.
numa_nodeThe associated NUMA node, or -1 for no NUMA preference.
timeout_msecsMax amount of milli-seconds this operation is allowed to take. (0 is infinite)
exclusiveIf exclusive, only a heap associated with this arena can allocate in it.
arena_idThe arena identifier.
+
+
+
Returns
0 if successful, ENOMEM if running out of memory, or ETIMEDOUT if timed out.
+ +
+
+ +

◆ mi_reserve_huge_os_pages_interleave()

+ +
+
+ + + + + - - + + - - +
int mi_reserve_huge_os_pages_interleave (size_t pages,
size_t timeout_msecs size_t numa_nodes,
)size_t timeout_msecs )
+
+ +

◆ mi_reserve_os_memory_ex()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
int mi_reserve_os_memory_ex (size_t size,
bool commit,
bool allow_large,
bool exclusive,
mi_arena_id_t * arena_id )
+
+ +

Reserve OS memory to be managed in an arena.

+
Parameters
+ + + + + + +
sizeSize the reserve.
commitShould the memory be initially committed?
allow_largeAllow the use of large OS pages?
exclusiveIs the returned arena exclusive?
arena_idThe new arena identifier.
+
+
+
Returns
Zero on success, an error code otherwise.
- -

◆ mi_stats_merge()

+ +

◆ mi_stats_merge()

@@ -830,8 +1135,7 @@

void mi_stats_merge ( - void  - ) + void ) @@ -841,8 +1145,8 @@

-

◆ mi_stats_print()

+ +

◆ mi_stats_print()

@@ -850,8 +1154,7 @@

void mi_stats_print ( - void *  - out) + void * out) @@ -868,8 +1171,8 @@

-

◆ mi_stats_print_out()

+ +

◆ mi_stats_print_out()

@@ -877,19 +1180,12 @@

void mi_stats_print_out ( - mi_output_fun *  - out, + mi_output_fun * out, - void *  - arg  - - - - ) - + void * arg )

@@ -897,8 +1193,8 @@

Parameters
- - + +
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
@@ -906,8 +1202,8 @@

-

◆ mi_stats_reset()

+ +

◆ mi_stats_reset()

@@ -915,8 +1211,7 @@

void mi_stats_reset ( - void  - ) + void ) @@ -926,8 +1221,92 @@

-

◆ mi_thread_done()

+ +

◆ mi_subproc_add_current_thread()

+ +
+
+ + + + + + + +
void mi_subproc_add_current_thread (mi_subproc_id_t subproc)
+
+ +

Add the current thread to the given sub-process.

+

This should be called right after a thread is created (and no allocation has taken place yet)

+ +
+
+ +

◆ mi_subproc_delete()

+ +
+
+ + + + + + + +
void mi_subproc_delete (mi_subproc_id_t subproc)
+
+ +

Delete a previously created sub-process.

+
Parameters
+ + +
subprocThe sub-process identifier. Only delete sub-processes if all associated threads have terminated.
+
+
+ +
+
+ +

◆ mi_subproc_main()

+ +
+
+ + + + + + + +
mi_subproc_id_t mi_subproc_main (void )
+
+ +

Get the main sub-process identifier.

+ +
+
+ +

◆ mi_subproc_new()

+ +
+
+ + + + + + + +
mi_subproc_id_t mi_subproc_new (void )
+
+ +

Create a fresh sub-process (with no associated threads yet).

+
Returns
The new sub-process identifier.
+ +
+
+ +

◆ mi_thread_done()

@@ -935,8 +1314,7 @@

void mi_thread_done ( - void  - ) + void ) @@ -947,8 +1325,8 @@

-

◆ mi_thread_init()

+ +

◆ mi_thread_init()

@@ -956,8 +1334,7 @@

void mi_thread_init ( - void  - ) + void ) @@ -968,8 +1345,8 @@

-

◆ mi_thread_stats_print_out()

+ +

◆ mi_thread_stats_print_out()

@@ -977,19 +1354,12 @@

void mi_thread_stats_print_out ( - mi_output_fun *  - out, + mi_output_fun * out, - void *  - arg  - - - - ) - + void * arg )

@@ -997,8 +1367,8 @@

Parameters
- - + +
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
@@ -1006,8 +1376,8 @@

-

◆ mi_usable_size()

+ +

◆ mi_usable_size()

- -

◆ mi_zalloc_small()

+ +

◆ mi_zalloc_small()

@@ -1071,7 +1439,7 @@

diff --git a/depends/mimalloc/docs/group__extended.js b/depends/mimalloc/docs/group__extended.js index c217aaca76b6..43d6378fb097 100644 --- a/depends/mimalloc/docs/group__extended.js +++ b/depends/mimalloc/docs/group__extended.js @@ -1,29 +1,42 @@ var group__extended = [ [ "MI_SMALL_SIZE_MAX", "group__extended.html#ga1ea64283508718d9d645c38efc2f4305", null ], - [ "mi_deferred_free_fun", "group__extended.html#ga299dae78d25ce112e384a98b7309c5be", null ], - [ "mi_error_fun", "group__extended.html#ga251d369cda3f1c2a955c555486ed90e5", null ], - [ "mi_output_fun", "group__extended.html#gad823d23444a4b77a40f66bf075a98a0c", null ], + [ "mi_arena_id_t", "group__extended.html#ga99fe38650d0b02e0e0f89ee024db91d3", null ], + [ "mi_deferred_free_fun", "group__extended.html#ga292a45f7dbc7cd23c5352ce1f0002816", null ], + [ "mi_error_fun", "group__extended.html#ga83fc6a688b322261e1c2deab000b0591", null ], + [ "mi_output_fun", "group__extended.html#gadf31cea7d0332a81c8b882cbbdbadb8d", null ], + [ "mi_subproc_id_t", "group__extended.html#ga8c0bcd1fee27c7641e9c3c0d991b3b7d", null ], + [ "mi_arena_area", "group__extended.html#ga9a25a00a22151619a0be91a10af7787f", null ], [ "mi_collect", "group__extended.html#ga421430e2226d7d468529cec457396756", null ], + [ "mi_debug_show_arenas", "group__extended.html#gad7439207f8f71fb6c382a9ea20b997e7", null ], [ "mi_good_size", "group__extended.html#gac057927cd06c854b45fe7847e921bd47", null ], + [ "mi_heap_new_ex", "group__extended.html#ga3ae360583f4351aa5267ee7e43008faf", null ], + [ "mi_heap_new_in_arena", "group__extended.html#gaaf2d9976576d5efd5544be12848af949", null ], [ "mi_is_in_heap_region", "group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6", null ], [ "mi_is_redirected", "group__extended.html#gaad25050b19f30cd79397b227e0157a3f", null ], - [ "mi_malloc_small", "group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99", null ], + [ "mi_malloc_small", "group__extended.html#ga7f050bc6b897da82692174f5fce59cde", null ], [ "mi_manage_os_memory", "group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf", null ], + [ "mi_manage_os_memory_ex", "group__extended.html#ga41ce8525d77bbb60f618fa1029994f6e", null ], [ "mi_process_info", "group__extended.html#ga7d862c2affd5790381da14eb102a364d", null ], [ "mi_register_deferred_free", "group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece", null ], [ "mi_register_error", "group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45", null ], [ "mi_register_output", "group__extended.html#gae5b17ff027cd2150b43a33040250cf3f", null ], [ "mi_reserve_huge_os_pages_at", "group__extended.html#ga7795a13d20087447281858d2c771cca1", null ], + [ "mi_reserve_huge_os_pages_at_ex", "group__extended.html#ga591aab1c2bc2ca920e33f0f9f9cb5c52", null ], [ "mi_reserve_huge_os_pages_interleave", "group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50", null ], [ "mi_reserve_os_memory", "group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767", null ], + [ "mi_reserve_os_memory_ex", "group__extended.html#ga32f519797fd9a81acb4f52d36e6d751b", null ], [ "mi_stats_merge", "group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1", null ], [ "mi_stats_print", "group__extended.html#ga2d126e5c62d3badc35445e5d84166df2", null ], [ "mi_stats_print_out", "group__extended.html#ga537f13b299ddf801e49a5a94fde02c79", null ], [ "mi_stats_reset", "group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99", null ], + [ "mi_subproc_add_current_thread", "group__extended.html#gadbc53414eb68b275588ec001ce1ddc7c", null ], + [ "mi_subproc_delete", "group__extended.html#gaa7d263e9429bac9ac8345c9d25de610e", null ], + [ "mi_subproc_main", "group__extended.html#ga2ecba0d7ebdc99e71bb985c4a1609806", null ], + [ "mi_subproc_new", "group__extended.html#ga8068cac328e41fa2170faef707315243", null ], [ "mi_thread_done", "group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf", null ], [ "mi_thread_init", "group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17", null ], [ "mi_thread_stats_print_out", "group__extended.html#gab1dac8476c46cb9eecab767eb40c1525", null ], [ "mi_usable_size", "group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee", null ], - [ "mi_zalloc_small", "group__extended.html#ga220f29f40a44404b0061c15bc1c31152", null ] + [ "mi_zalloc_small", "group__extended.html#ga51c47637e81df0e2f13a2d7a2dec123e", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__heap.html b/depends/mimalloc/docs/group__heap.html index 0f21ea42c122..c01a9e54c1e2 100644 --- a/depends/mimalloc/docs/group__heap.html +++ b/depends/mimalloc/docs/group__heap.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Heap Allocation + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,110 +91,116 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Heap Allocation
+
Heap Allocation

First-class heaps that can be destroyed in one go. More...

- - - + +

+

Typedefs

typedef struct mi_heap_s mi_heap_t
 Type of first-class heaps. More...
typedef struct mi_heap_s mi_heap_t
 Type of first-class heaps.
 
- - - - - - + + + + + - - + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+

Functions

mi_heap_tmi_heap_new ()
 Create a new heap that can be used for allocation. More...
 
void mi_heap_delete (mi_heap_t *heap)
 Delete a previously allocated heap. More...
mi_heap_tmi_heap_new ()
 Create a new heap that can be used for allocation.
 
void mi_heap_delete (mi_heap_t *heap)
 Delete a previously allocated heap.
 
void mi_heap_destroy (mi_heap_t *heap)
 Destroy a heap, freeing all its still allocated blocks. More...
void mi_heap_destroy (mi_heap_t *heap)
 Destroy a heap, freeing all its still allocated blocks.
 
mi_heap_tmi_heap_set_default (mi_heap_t *heap)
 Set the default heap to use for mi_malloc() et al. More...
 
mi_heap_tmi_heap_get_default ()
 Get the default heap that is used for mi_malloc() et al. More...
 
mi_heap_tmi_heap_get_backing ()
 Get the backing heap. More...
 
void mi_heap_collect (mi_heap_t *heap, bool force)
 Release outstanding resources in a specific heap. More...
mi_heap_tmi_heap_set_default (mi_heap_t *heap)
 Set the default heap to use in the current thread for mi_malloc() et al.
 
mi_heap_tmi_heap_get_default ()
 Get the default heap that is used for mi_malloc() et al.
 
mi_heap_tmi_heap_get_backing ()
 Get the backing heap.
 
void mi_heap_collect (mi_heap_t *heap, bool force)
 Release outstanding resources in a specific heap.
 
void * mi_heap_malloc (mi_heap_t *heap, size_t size)
 Allocate in a specific heap. More...
 
void * mi_heap_malloc_small (mi_heap_t *heap, size_t size)
 Allocate a small object in a specific heap. More...
 
void * mi_heap_zalloc (mi_heap_t *heap, size_t size)
 Allocate zero-initialized in a specific heap. More...
 
void * mi_heap_calloc (mi_heap_t *heap, size_t count, size_t size)
 Allocate count zero-initialized elements in a specific heap. More...
 
void * mi_heap_mallocn (mi_heap_t *heap, size_t count, size_t size)
 Allocate count elements in a specific heap. More...
 
char * mi_heap_strdup (mi_heap_t *heap, const char *s)
 Duplicate a string in a specific heap. More...
 
char * mi_heap_strndup (mi_heap_t *heap, const char *s, size_t n)
 Duplicate a string of at most length n in a specific heap. More...
 
char * mi_heap_realpath (mi_heap_t *heap, const char *fname, char *resolved_name)
 Resolve a file path name using a specific heap to allocate the result. More...
 
void * mi_heap_realloc (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_reallocn (mi_heap_t *heap, void *p, size_t count, size_t size)
 
void * mi_heap_reallocf (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_malloc_aligned (mi_heap_t *heap, size_t size, size_t alignment)
 
void * mi_heap_malloc_aligned_at (mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_zalloc_aligned (mi_heap_t *heap, size_t size, size_t alignment)
 
void * mi_heap_zalloc_aligned_at (mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_calloc_aligned (mi_heap_t *heap, size_t count, size_t size, size_t alignment)
 
void * mi_heap_calloc_aligned_at (mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_realloc_aligned (mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
 
void * mi_heap_realloc_aligned_at (mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_heap_malloc (mi_heap_t *heap, size_t size)
 Allocate in a specific heap.
 
void * mi_heap_malloc_small (mi_heap_t *heap, size_t size)
 Allocate a small object in a specific heap.
 
void * mi_heap_zalloc (mi_heap_t *heap, size_t size)
 Allocate zero-initialized in a specific heap.
 
void * mi_heap_calloc (mi_heap_t *heap, size_t count, size_t size)
 Allocate count zero-initialized elements in a specific heap.
 
void * mi_heap_mallocn (mi_heap_t *heap, size_t count, size_t size)
 Allocate count elements in a specific heap.
 
char * mi_heap_strdup (mi_heap_t *heap, const char *s)
 Duplicate a string in a specific heap.
 
char * mi_heap_strndup (mi_heap_t *heap, const char *s, size_t n)
 Duplicate a string of at most length n in a specific heap.
 
char * mi_heap_realpath (mi_heap_t *heap, const char *fname, char *resolved_name)
 Resolve a file path name using a specific heap to allocate the result.
 
void * mi_heap_realloc (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_reallocn (mi_heap_t *heap, void *p, size_t count, size_t size)
 
void * mi_heap_reallocf (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_malloc_aligned (mi_heap_t *heap, size_t size, size_t alignment)
 
void * mi_heap_malloc_aligned_at (mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_zalloc_aligned (mi_heap_t *heap, size_t size, size_t alignment)
 
void * mi_heap_zalloc_aligned_at (mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_calloc_aligned (mi_heap_t *heap, size_t count, size_t size, size_t alignment)
 
void * mi_heap_calloc_aligned_at (mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_realloc_aligned (mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
 
void * mi_heap_realloc_aligned_at (mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
 

Detailed Description

First-class heaps that can be destroyed in one go.

Typedef Documentation

- -

◆ mi_heap_t

+ +

◆ mi_heap_t

- +
typedef struct mi_heap_s mi_heap_ttypedef struct mi_heap_s mi_heap_t
@@ -202,131 +211,104 @@

Function Documentation

- -

◆ mi_heap_calloc()

+ +

◆ mi_heap_calloc()

- + - - + - - + - - - - - - - +
void* mi_heap_calloc void * mi_heap_calloc (mi_heap_theap, mi_heap_t * heap,
size_t count, size_t count,
size_t size 
)size_t size )
-

Allocate count zero-initialized elements in a specific heap.

-
See also
mi_calloc()
+

Allocate count zero-initialized elements in a specific heap.

+
See also
mi_calloc()
- -

◆ mi_heap_calloc_aligned()

+ +

◆ mi_heap_calloc_aligned()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_calloc_aligned void * mi_heap_calloc_aligned (mi_heap_theap, mi_heap_t * heap,
size_t count, size_t count,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_calloc_aligned_at()

+ +

◆ mi_heap_calloc_aligned_at()

- + - - + - - + - - + - - + - - - - - - - +
void* mi_heap_calloc_aligned_at void * mi_heap_calloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
size_t count, size_t count,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_heap_collect()

+ +

◆ mi_heap_collect()

@@ -334,19 +316,12 @@

void mi_heap_collect ( - mi_heap_t *  - heap, + mi_heap_t * heap, - bool  - force  - - - - ) - + bool force )

@@ -355,8 +330,8 @@

-

◆ mi_heap_delete()

+ +

◆ mi_heap_delete()

@@ -364,21 +339,20 @@

void mi_heap_delete ( - mi_heap_t *  - heap) + mi_heap_t * heap)

Delete a previously allocated heap.

-

This will release resources and migrate any still allocated blocks in this heap (efficienty) to the default heap.

-

If heap is the default heap, the default heap is set to the backing heap.

+

This will release resources and migrate any still allocated blocks in this heap (efficiently) to the default heap.

+

If heap is the default heap, the default heap is set to the backing heap.

- -

◆ mi_heap_destroy()

+ +

◆ mi_heap_destroy()

- -

◆ mi_heap_get_backing()

+ +

◆ mi_heap_get_backing()

- + - +
mi_heap_t* mi_heap_get_backing mi_heap_t * mi_heap_get_backing ())
@@ -419,209 +392,170 @@

-

◆ mi_heap_get_default()

+ +

◆ mi_heap_get_default()

- + - +
mi_heap_t* mi_heap_get_default mi_heap_t * mi_heap_get_default ())
-

Get the default heap that is used for mi_malloc() et al.

-
Returns
The current default heap.
+

Get the default heap that is used for mi_malloc() et al.

+

(for the current thread).

Returns
The current default heap.
- -

◆ mi_heap_malloc()

+ +

◆ mi_heap_malloc()

- + - - + - - - - - - - +
void* mi_heap_malloc void * mi_heap_malloc (mi_heap_theap, mi_heap_t * heap,
size_t size 
)size_t size )

Allocate in a specific heap.

-
See also
mi_malloc()
+
See also
mi_malloc()
- -

◆ mi_heap_malloc_aligned()

+ +

◆ mi_heap_malloc_aligned()

- + - - + - - + - - - - - - - +
void* mi_heap_malloc_aligned void * mi_heap_malloc_aligned (mi_heap_theap, mi_heap_t * heap,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_malloc_aligned_at()

+ +

◆ mi_heap_malloc_aligned_at()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_malloc_aligned_at void * mi_heap_malloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_heap_malloc_small()

+ +

◆ mi_heap_malloc_small()

- + - - + - - - - - - - +
void* mi_heap_malloc_small void * mi_heap_malloc_small (mi_heap_theap, mi_heap_t * heap,
size_t size 
)size_t size )

Allocate a small object in a specific heap.

-

size must be smaller or equal to MI_SMALL_SIZE_MAX().

See also
mi_malloc()
+

size must be smaller or equal to MI_SMALL_SIZE_MAX().

See also
mi_malloc()
- -

◆ mi_heap_mallocn()

+ +

◆ mi_heap_mallocn()

- + - - + - - + - - - - - - - +
void* mi_heap_mallocn void * mi_heap_mallocn (mi_heap_theap, mi_heap_t * heap,
size_t count, size_t count,
size_t size 
)size_t size )
-

Allocate count elements in a specific heap.

-
See also
mi_mallocn()
+

Allocate count elements in a specific heap.

+
See also
mi_mallocn()
- -

◆ mi_heap_new()

+ +

◆ mi_heap_new()

- + - +
mi_heap_t* mi_heap_new mi_heap_t * mi_heap_new ())
@@ -631,254 +565,201 @@

-

◆ mi_heap_realloc()

+ +

◆ mi_heap_realloc()

- + - - + - - + - - - - - - - +
void* mi_heap_realloc void * mi_heap_realloc (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize 
)size_t newsize )
- -

◆ mi_heap_realloc_aligned()

+ +

◆ mi_heap_realloc_aligned()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_realloc_aligned void * mi_heap_realloc_aligned (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_realloc_aligned_at()

+ +

◆ mi_heap_realloc_aligned_at()

- + - - + - - + - - + - - + - - - - - - - +
void* mi_heap_realloc_aligned_at void * mi_heap_realloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_heap_reallocf()

+ +

◆ mi_heap_reallocf()

- + - - + - - + - - - - - - - +
void* mi_heap_reallocf void * mi_heap_reallocf (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize 
)size_t newsize )
- -

◆ mi_heap_reallocn()

+ +

◆ mi_heap_reallocn()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_reallocn void * mi_heap_reallocn (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t count, size_t count,
size_t size 
)size_t size )
- -

◆ mi_heap_realpath()

+ +

◆ mi_heap_realpath()

- + - - + - - + - - - - - - - +
char* mi_heap_realpath char * mi_heap_realpath (mi_heap_theap, mi_heap_t * heap,
const char * fname, const char * fname,
char * resolved_name 
)char * resolved_name )
-

Resolve a file path name using a specific heap to allocate the result.

-
See also
mi_realpath()
+

Resolve a file path name using a specific heap to allocate the result.

+
See also
mi_realpath()
- -

◆ mi_heap_set_default()

+ +

◆ mi_heap_set_default()

- + - - +
mi_heap_t* mi_heap_set_default mi_heap_t * mi_heap_set_default (mi_heap_theap)mi_heap_t * heap)
-

Set the default heap to use for mi_malloc() et al.

+

Set the default heap to use in the current thread for mi_malloc() et al.

Parameters
@@ -889,173 +770,134 @@

-

◆ mi_heap_strdup()

+ +

◆ mi_heap_strdup()

heapThe new default heap.
- + - - + - - - - - - - +
char* mi_heap_strdup char * mi_heap_strdup (mi_heap_theap, mi_heap_t * heap,
const char * s 
)const char * s )

Duplicate a string in a specific heap.

-
See also
mi_strdup()
+
See also
mi_strdup()
- -

◆ mi_heap_strndup()

+ +

◆ mi_heap_strndup()

- + - - + - - + - - - - - - - +
char* mi_heap_strndup char * mi_heap_strndup (mi_heap_theap, mi_heap_t * heap,
const char * s, const char * s,
size_t n 
)size_t n )
-

Duplicate a string of at most length n in a specific heap.

-
See also
mi_strndup()
+

Duplicate a string of at most length n in a specific heap.

+
See also
mi_strndup()
- -

◆ mi_heap_zalloc()

+ +

◆ mi_heap_zalloc()

- + - - + - - - - - - - +
void* mi_heap_zalloc void * mi_heap_zalloc (mi_heap_theap, mi_heap_t * heap,
size_t size 
)size_t size )

Allocate zero-initialized in a specific heap.

-
See also
mi_zalloc()
+
See also
mi_zalloc()
- -

◆ mi_heap_zalloc_aligned()

+ +

◆ mi_heap_zalloc_aligned()

- + - - + - - + - - - - - - - +
void* mi_heap_zalloc_aligned void * mi_heap_zalloc_aligned (mi_heap_theap, mi_heap_t * heap,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_zalloc_aligned_at()

+ +

◆ mi_heap_zalloc_aligned_at()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_zalloc_aligned_at void * mi_heap_zalloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
@@ -1067,7 +909,7 @@

diff --git a/depends/mimalloc/docs/group__heap.js b/depends/mimalloc/docs/group__heap.js index 13d13778865b..8b6118d56b34 100644 --- a/depends/mimalloc/docs/group__heap.js +++ b/depends/mimalloc/docs/group__heap.js @@ -1,30 +1,30 @@ var group__heap = [ [ "mi_heap_t", "group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2", null ], - [ "mi_heap_calloc", "group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55", null ], - [ "mi_heap_calloc_aligned", "group__heap.html#ga4af03a6e2b93fae77424d93f889705c3", null ], - [ "mi_heap_calloc_aligned_at", "group__heap.html#ga08ca6419a5c057a4d965868998eef487", null ], + [ "mi_heap_calloc", "group__heap.html#gac0098aaf231d3e9586c73136d5df95da", null ], + [ "mi_heap_calloc_aligned", "group__heap.html#gacafcc26df827c7a7de5e850217566108", null ], + [ "mi_heap_calloc_aligned_at", "group__heap.html#gaa42ec2079989c4374f2c331d9b35f4e4", null ], [ "mi_heap_collect", "group__heap.html#ga7922f7495cde30b1984d0e6072419298", null ], [ "mi_heap_delete", "group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409", null ], [ "mi_heap_destroy", "group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d", null ], - [ "mi_heap_get_backing", "group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc", null ], - [ "mi_heap_get_default", "group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05", null ], - [ "mi_heap_malloc", "group__heap.html#ga9cbed01e42c0647907295de92c3fa296", null ], - [ "mi_heap_malloc_aligned", "group__heap.html#gab5b87e1805306f70df38789fcfcf6653", null ], - [ "mi_heap_malloc_aligned_at", "group__heap.html#ga23acd7680fb0976dde3783254c6c874b", null ], - [ "mi_heap_malloc_small", "group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368", null ], - [ "mi_heap_mallocn", "group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0", null ], - [ "mi_heap_new", "group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11", null ], - [ "mi_heap_realloc", "group__heap.html#gaaef3395f66be48f37bdc8322509c5d81", null ], - [ "mi_heap_realloc_aligned", "group__heap.html#gafc603b696bd14cae6da28658f950d98c", null ], - [ "mi_heap_realloc_aligned_at", "group__heap.html#gaf96c788a1bf553fe2d371de9365e047c", null ], - [ "mi_heap_reallocf", "group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527", null ], - [ "mi_heap_reallocn", "group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8", null ], - [ "mi_heap_realpath", "group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0", null ], - [ "mi_heap_set_default", "group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422", null ], - [ "mi_heap_strdup", "group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5", null ], - [ "mi_heap_strndup", "group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a", null ], - [ "mi_heap_zalloc", "group__heap.html#ga903104592c8ed53417a3762da6241133", null ], - [ "mi_heap_zalloc_aligned", "group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0", null ], - [ "mi_heap_zalloc_aligned_at", "group__heap.html#ga45fb43a62776fbebbdf1edd99b527954", null ] + [ "mi_heap_get_backing", "group__heap.html#gac6ac9f0e7be9ab4ff70acfc8dad1235a", null ], + [ "mi_heap_get_default", "group__heap.html#ga14c667a6e2c5d28762d8cb7d4e057909", null ], + [ "mi_heap_malloc", "group__heap.html#gab374e206c7034e0d899fb934e4f4a863", null ], + [ "mi_heap_malloc_aligned", "group__heap.html#ga33f4f05b7fea7af2113c62a4bf882cc5", null ], + [ "mi_heap_malloc_aligned_at", "group__heap.html#gae7ffc045c3996497a7f3a5f6fe7b8aaa", null ], + [ "mi_heap_malloc_small", "group__heap.html#ga012c5c8abe22b10043de39ff95909541", null ], + [ "mi_heap_mallocn", "group__heap.html#gab0f755c0b21c387fe8e9024200faa372", null ], + [ "mi_heap_new", "group__heap.html#gaa718bb226ec0546ba6d1b6cb32179f3a", null ], + [ "mi_heap_realloc", "group__heap.html#gac5252d6a2e510bd349e4fcb452e6a93a", null ], + [ "mi_heap_realloc_aligned", "group__heap.html#gaccf8c249872f30bf1c2493a09197d734", null ], + [ "mi_heap_realloc_aligned_at", "group__heap.html#ga6df988a7219d5707f010d5f3eb0dc3f5", null ], + [ "mi_heap_reallocf", "group__heap.html#gae7cd171425bee04c683c65a3701f0b4a", null ], + [ "mi_heap_reallocn", "group__heap.html#gaccf7bfe10ce510a000d3547d9cf7fa29", null ], + [ "mi_heap_realpath", "group__heap.html#ga55545a3ec6da29c5b4f62e540ecac1e2", null ], + [ "mi_heap_set_default", "group__heap.html#ga349b677dec7da5eacdbc7a385bd62a4a", null ], + [ "mi_heap_strdup", "group__heap.html#ga5754e09ccc51dd6bc73885bb6ea21b7a", null ], + [ "mi_heap_strndup", "group__heap.html#gad224df78f1fbee942df8adf023e12cf3", null ], + [ "mi_heap_zalloc", "group__heap.html#gabebc796399619d964d8db77aa835e8c1", null ], + [ "mi_heap_zalloc_aligned", "group__heap.html#ga6466bde8b5712aa34e081a8317f9f471", null ], + [ "mi_heap_zalloc_aligned_at", "group__heap.html#ga484e3d01cd174f78c7e53370e5a7c819", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__malloc.html b/depends/mimalloc/docs/group__malloc.html index 2dc16656728f..3694d2b2a824 100644 --- a/depends/mimalloc/docs/group__malloc.html +++ b/depends/mimalloc/docs/group__malloc.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Basic Allocation + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,94 +91,93 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Basic Allocation
+
Basic Allocation

The basic allocation interface. More...

- - - + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + +

+

Functions

void mi_free (void *p)
 Free previously allocated memory. More...
void mi_free (void *p)
 Free previously allocated memory.
 
void * mi_malloc (size_t size)
 Allocate size bytes. More...
 
void * mi_zalloc (size_t size)
 Allocate zero-initialized size bytes. More...
 
void * mi_calloc (size_t count, size_t size)
 Allocate zero-initialized count elements of size bytes. More...
 
void * mi_realloc (void *p, size_t newsize)
 Re-allocate memory to newsize bytes. More...
 
void * mi_recalloc (void *p, size_t count, size_t size)
 Re-allocate memory to count elements of size bytes, with extra memory initialized to zero. More...
void * mi_malloc (size_t size)
 Allocate size bytes.
 
void * mi_zalloc (size_t size)
 Allocate zero-initialized size bytes.
 
void * mi_calloc (size_t count, size_t size)
 Allocate zero-initialized count elements of size bytes.
 
void * mi_realloc (void *p, size_t newsize)
 Re-allocate memory to newsize bytes.
 
void * mi_recalloc (void *p, size_t count, size_t size)
 Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
 
void * mi_expand (void *p, size_t newsize)
 Try to re-allocate memory to newsize bytes in place. More...
 
void * mi_mallocn (size_t count, size_t size)
 Allocate count elements of size bytes. More...
 
void * mi_reallocn (void *p, size_t count, size_t size)
 Re-allocate memory to count elements of size bytes. More...
 
void * mi_reallocf (void *p, size_t newsize)
 Re-allocate memory to newsize bytes,. More...
 
char * mi_strdup (const char *s)
 Allocate and duplicate a string. More...
 
char * mi_strndup (const char *s, size_t n)
 Allocate and duplicate a string up to n bytes. More...
 
char * mi_realpath (const char *fname, char *resolved_name)
 Resolve a file path name. More...
 
void * mi_expand (void *p, size_t newsize)
 Try to re-allocate memory to newsize bytes in place.
 
void * mi_mallocn (size_t count, size_t size)
 Allocate count elements of size bytes.
 
void * mi_reallocn (void *p, size_t count, size_t size)
 Re-allocate memory to count elements of size bytes.
 
void * mi_reallocf (void *p, size_t newsize)
 Re-allocate memory to newsize bytes,.
 
char * mi_strdup (const char *s)
 Allocate and duplicate a string.
 
char * mi_strndup (const char *s, size_t n)
 Allocate and duplicate a string up to n bytes.
 
char * mi_realpath (const char *fname, char *resolved_name)
 Resolve a file path name.
 

Detailed Description

The basic allocation interface.

Function Documentation

- -

◆ mi_calloc()

+ +

◆ mi_calloc()

- + - - + - - - - - - - +
void* mi_calloc void * mi_calloc (size_t count, size_t count,
size_t size 
)size_t size )
-

Allocate zero-initialized count elements of size bytes.

+

Allocate zero-initialized count elements of size bytes.

Parameters
@@ -183,51 +185,44 @@

Returns
pointer to the allocated memory of size*count bytes, or NULL if either out of memory or when count*size overflows.
-

Returns a unique pointer if called with either size or count of 0.

See also
mi_zalloc()
+
Returns
pointer to the allocated memory of size*count bytes, or NULL if either out of memory or when count*size overflows.
+

Returns a unique pointer if called with either size or count of 0.

See also
mi_zalloc()
- -

◆ mi_expand()

+ +

◆ mi_expand()

countnumber of elements.
- + - - + - - - - - - - +
void* mi_expand void * mi_expand (void * p, void * p,
size_t newsize 
)size_t newsize )
-

Try to re-allocate memory to newsize bytes in place.

+

Try to re-allocate memory to newsize bytes in place.

Parameters
- +
ppointer to previously allocated memory (or NULL).
ppointer to previously allocated memory (or NULL).
newsizethe new required size in bytes.
-
Returns
pointer to the re-allocated memory of newsize bytes (always equal to p), or NULL if either out of memory or if the memory could not be expanded in place. If NULL is returned, the pointer p is not freed. Otherwise the original pointer is returned as the reallocated result since it fits in-place with the new size. If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.
+
Returns
pointer to the re-allocated memory of newsize bytes (always equal to p), or NULL if either out of memory or if the memory could not be expanded in place. If NULL is returned, the pointer p is not freed. Otherwise the original pointer is returned as the reallocated result since it fits in-place with the new size. If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.
- -

◆ mi_free()

+ +

◆ mi_free()

- -

◆ mi_malloc()

+ +

◆ mi_malloc()

- + - - +
void* mi_malloc void * mi_malloc (size_t size)size_t size)
-

Allocate size bytes.

+

Allocate size bytes.

Parameters
sizenumber of bytes to allocate.
-
Returns
pointer to the allocated memory or NULL if out of memory. Returns a unique pointer if called with size 0.
+
Returns
pointer to the allocated memory or NULL if out of memory. Returns a unique pointer if called with size 0.
- -

◆ mi_mallocn()

+ +

◆ mi_mallocn()

- + - - + - - - - - - - +
void* mi_mallocn void * mi_mallocn (size_t count, size_t count,
size_t size 
)size_t size )
-

Allocate count elements of size bytes.

+

Allocate count elements of size bytes.

Parameters
@@ -313,159 +299,130 @@

Returns
A pointer to a block of count * size bytes, or NULL if out of memory or if count * size overflows.
-

If there is no overflow, it behaves exactly like mi_malloc(p,count*size).

See also
mi_calloc()
+
Returns
A pointer to a block of count * size bytes, or NULL if out of memory or if count * size overflows.
+

If there is no overflow, it behaves exactly like mi_malloc(count*size).

See also
mi_calloc()
mi_zallocn()
- -

◆ mi_realloc()

+ +

◆ mi_realloc()

countThe number of elements.
- + - - + - - - - - - - +
void* mi_realloc void * mi_realloc (void * p, void * p,
size_t newsize 
)size_t newsize )
-

Re-allocate memory to newsize bytes.

+

Re-allocate memory to newsize bytes.

Parameters
- +
ppointer to previously allocated memory (or NULL).
ppointer to previously allocated memory (or NULL).
newsizethe new required size in bytes.
-
Returns
pointer to the re-allocated memory of newsize bytes, or NULL if out of memory. If NULL is returned, the pointer p is not freed. Otherwise the original pointer is either freed or returned as the reallocated result (in case it fits in-place with the new size). If the pointer p is NULL, it behaves as mi_malloc(newsize). If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.
+
Returns
pointer to the re-allocated memory of newsize bytes, or NULL if out of memory. If NULL is returned, the pointer p is not freed. Otherwise the original pointer is either freed or returned as the reallocated result (in case it fits in-place with the new size). If the pointer p is NULL, it behaves as mi_malloc(newsize). If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.
- -

◆ mi_reallocf()

+ +

◆ mi_reallocf()

- + - - + - - - - - - - +
void* mi_reallocf void * mi_reallocf (void * p, void * p,
size_t newsize 
)size_t newsize )
-

Re-allocate memory to newsize bytes,.

+

Re-allocate memory to newsize bytes,.

Parameters
- +
ppointer to previously allocated memory (or NULL).
ppointer to previously allocated memory (or NULL).
newsizethe new required size in bytes.
-
Returns
pointer to the re-allocated memory of newsize bytes, or NULL if out of memory.
-

In contrast to mi_realloc(), if NULL is returned, the original pointer p is freed (if it was not NULL itself). Otherwise the original pointer is either freed or returned as the reallocated result (in case it fits in-place with the new size). If the pointer p is NULL, it behaves as mi_malloc(newsize). If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.

+
Returns
pointer to the re-allocated memory of newsize bytes, or NULL if out of memory.
+

In contrast to mi_realloc(), if NULL is returned, the original pointer p is freed (if it was not NULL itself). Otherwise the original pointer is either freed or returned as the reallocated result (in case it fits in-place with the new size). If the pointer p is NULL, it behaves as mi_malloc(newsize). If newsize is larger than the original size allocated for p, the bytes after size are uninitialized.

See also
reallocf (on BSD)
- -

◆ mi_reallocn()

+ +

◆ mi_reallocn()

- + - - + - - + - - - - - - - +
void* mi_reallocn void * mi_reallocn (void * p, void * p,
size_t count, size_t count,
size_t size 
)size_t size )
-

Re-allocate memory to count elements of size bytes.

+

Re-allocate memory to count elements of size bytes.

Parameters
- +
pPointer to a previously allocated block (or NULL).
pPointer to a previously allocated block (or NULL).
countThe number of elements.
sizeThe size of each element.
-
Returns
A pointer to a re-allocated block of count * size bytes, or NULL if out of memory or if count * size overflows.
+
Returns
A pointer to a re-allocated block of count * size bytes, or NULL if out of memory or if count * size overflows.

If there is no overflow, it behaves exactly like mi_realloc(p,count*size).

See also
reallocarray() (on BSD)
- -

◆ mi_realpath()

+ +

◆ mi_realpath()

- + - - + - - - - - - - +
char* mi_realpath char * mi_realpath (const char * fname, const char * fname,
char * resolved_name 
)char * resolved_name )
@@ -474,18 +431,18 @@

Parameters
- +
fnameFile name.
resolved_nameShould be NULL (but can also point to a buffer of at least PATH_MAX bytes).
resolved_nameShould be NULL (but can also point to a buffer of at least PATH_MAX bytes).
-
Returns
If successful a pointer to the resolved absolute file name, or NULL on failure (with errno set to the error code).
-

If resolved_name was NULL, the returned result should be freed with mi_free().

-

Replacement for the standard realpath() such that mi_free() can be used on the returned result (if resolved_name was NULL).

+
Returns
If successful a pointer to the resolved absolute file name, or NULL on failure (with errno set to the error code).
+

If resolved_name was NULL, the returned result should be freed with mi_free().

+

Replacement for the standard realpath() such that mi_free() can be used on the returned result (if resolved_name was NULL).

- -

◆ mi_recalloc()

+ +

◆ mi_recalloc()

- -

◆ mi_strdup()

+ +

◆ mi_strdup()

- + - - +
char* mi_strdup char * mi_strdup (const char * s)const char * s)
@@ -551,65 +499,57 @@

Parameters
- +
sstring to duplicate (or NULL).
sstring to duplicate (or NULL).
-
Returns
a pointer to newly allocated memory initialized to string s, or NULL if either out of memory or if s is NULL.
-

Replacement for the standard strdup() such that mi_free() can be used on the returned result.

+
Returns
a pointer to newly allocated memory initialized to string s, or NULL if either out of memory or if s is NULL.
+

Replacement for the standard strdup() such that mi_free() can be used on the returned result.

- -

◆ mi_strndup()

+ +

◆ mi_strndup()

- + - - + - - - - - - - +
char* mi_strndup char * mi_strndup (const char * s, const char * s,
size_t n 
)size_t n )
-

Allocate and duplicate a string up to n bytes.

+

Allocate and duplicate a string up to n bytes.

Parameters
- +
sstring to duplicate (or NULL).
sstring to duplicate (or NULL).
nmaximum number of bytes to copy (excluding the terminating zero).
-
Returns
a pointer to newly allocated memory initialized to string s up to the first n bytes (and always zero terminated), or NULL if either out of memory or if s is NULL.
-

Replacement for the standard strndup() such that mi_free() can be used on the returned result.

+
Returns
a pointer to newly allocated memory initialized to string s up to the first n bytes (and always zero terminated), or NULL if either out of memory or if s is NULL.
+

Replacement for the standard strndup() such that mi_free() can be used on the returned result.

- -

◆ mi_zalloc()

+ +

◆ mi_zalloc()

@@ -631,7 +571,7 @@

diff --git a/depends/mimalloc/docs/group__malloc.js b/depends/mimalloc/docs/group__malloc.js index 7293ffafd19a..5b0765205350 100644 --- a/depends/mimalloc/docs/group__malloc.js +++ b/depends/mimalloc/docs/group__malloc.js @@ -1,16 +1,16 @@ var group__malloc = [ - [ "mi_calloc", "group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d", null ], - [ "mi_expand", "group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4", null ], + [ "mi_calloc", "group__malloc.html#ga6686568014b54d1e6c7ac64a076e4f56", null ], + [ "mi_expand", "group__malloc.html#ga19299856216cfbb08e2628593654dfb0", null ], [ "mi_free", "group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95", null ], - [ "mi_malloc", "group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a", null ], - [ "mi_mallocn", "group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6", null ], - [ "mi_realloc", "group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6", null ], - [ "mi_reallocf", "group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0", null ], - [ "mi_reallocn", "group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853", null ], - [ "mi_realpath", "group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe", null ], + [ "mi_malloc", "group__malloc.html#gae1dd97b542420c87ae085e822b1229e8", null ], + [ "mi_mallocn", "group__malloc.html#ga61f46bade3db76ca24aaafedc40de7b6", null ], + [ "mi_realloc", "group__malloc.html#ga0621af6a5e3aa384e6a1b548958bf583", null ], + [ "mi_reallocf", "group__malloc.html#ga4dc3a4067037b151a64629fe8a332641", null ], + [ "mi_reallocn", "group__malloc.html#ga8bddfb4a1270a0854bbcf44cb3980467", null ], + [ "mi_realpath", "group__malloc.html#ga94c3afcc086e85d75a57e9f76b9b71dd", null ], [ "mi_recalloc", "group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc", null ], - [ "mi_strdup", "group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2", null ], - [ "mi_strndup", "group__malloc.html#gaaabf971c2571891433477e2d21a35266", null ], - [ "mi_zalloc", "group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000", null ] + [ "mi_strdup", "group__malloc.html#ga245ac90ebc2cfdd17de599e5fea59889", null ], + [ "mi_strndup", "group__malloc.html#ga486d0d26b3b3794f6d1cdb41a9aed92d", null ], + [ "mi_zalloc", "group__malloc.html#gae6e38c4403247a7b40d80419e093bfb8", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__options.html b/depends/mimalloc/docs/group__options.html index f92905d42f51..1092afcb7002 100644 --- a/depends/mimalloc/docs/group__options.html +++ b/depends/mimalloc/docs/group__options.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Runtime Options + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,128 +91,177 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Runtime Options
+
Runtime Options

Set runtime behavior. More...

- - - +

+

Enumerations

enum  mi_option_t {
-  mi_option_show_errors -, mi_option_show_stats -, mi_option_verbose -, mi_option_eager_commit +
enum  mi_option_t {
+  mi_option_show_errors +, mi_option_show_stats +, mi_option_verbose +, mi_option_max_errors +,
+  mi_option_max_warnings +, mi_option_reserve_huge_os_pages +, mi_option_reserve_huge_os_pages_at +, mi_option_reserve_os_memory ,
-  mi_option_eager_region_commit -, mi_option_large_os_pages -, mi_option_reserve_huge_os_pages -, mi_option_reserve_huge_os_pages_at +  mi_option_allow_large_os_pages +, mi_option_purge_decommits +, mi_option_arena_reserve +, mi_option_os_tag ,
-  mi_option_segment_cache -, mi_option_page_reset -, mi_option_segment_reset -, mi_option_reset_delay +  mi_option_retry_on_oom +, mi_option_eager_commit +, mi_option_eager_commit_delay +, mi_option_arena_eager_commit ,
-  mi_option_use_numa_nodes -, mi_option_reset_decommits -, mi_option_eager_commit_delay -, mi_option_os_tag +  mi_option_abandoned_page_purge +, mi_option_purge_delay +, mi_option_use_numa_nodes +, mi_option_disallow_os_alloc ,
-  _mi_option_last +  mi_option_limit_os_alloc +, mi_option_max_segment_reclaim +, mi_option_destroy_on_exit +, mi_option_arena_purge_mult +,
+  mi_option_abandoned_reclaim_on_free +, mi_option_purge_extend_delay +, mi_option_disallow_arena_alloc +, mi_option_visit_abandoned +,
+  _mi_option_last
}
 Runtime options. More...
 Runtime options. More...
 
- - + - + - + - + - + - + - + + + + + - +

+

Functions

bool mi_option_is_enabled (mi_option_t option)
bool mi_option_is_enabled (mi_option_t option)
 
void mi_option_enable (mi_option_t option)
void mi_option_enable (mi_option_t option)
 
void mi_option_disable (mi_option_t option)
void mi_option_disable (mi_option_t option)
 
void mi_option_set_enabled (mi_option_t option, bool enable)
void mi_option_set_enabled (mi_option_t option, bool enable)
 
void mi_option_set_enabled_default (mi_option_t option, bool enable)
void mi_option_set_enabled_default (mi_option_t option, bool enable)
 
long mi_option_get (mi_option_t option)
long mi_option_get (mi_option_t option)
 
void mi_option_set (mi_option_t option, long value)
long mi_option_get_clamp (mi_option_t option, long min, long max)
 
size_t mi_option_get_size (mi_option_t option)
 
void mi_option_set (mi_option_t option, long value)
 
void mi_option_set_default (mi_option_t option, long value)
void mi_option_set_default (mi_option_t option, long value)
 

Detailed Description

Set runtime behavior.

Enumeration Type Documentation

- -

◆ mi_option_t

+ +

◆ mi_option_t

- +
enum mi_option_tenum mi_option_t

Runtime options.

- + + + + + + - - - - - - - - - - - - - - - - + + + + + + +
Enumerator
mi_option_show_errors 

Print error messages to stderr.

+
Enumerator
mi_option_show_errors 

Print error messages.

+
mi_option_show_stats 

Print statistics on termination.

+
mi_option_verbose 

Print verbose messages.

+
mi_option_max_errors 

issue at most N error messages

+
mi_option_max_warnings 

issue at most N warning messages

+
mi_option_reserve_huge_os_pages 

reserve N huge OS pages (1GiB pages) at startup

+
mi_option_reserve_huge_os_pages_at 

Reserve N huge OS pages at a specific NUMA node N.

mi_option_show_stats 

Print statistics to stderr when the program is done.

+
mi_option_reserve_os_memory 

reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use mi_option_get_size)

mi_option_verbose 

Print verbose messages to stderr.

+
mi_option_allow_large_os_pages 

allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process.

mi_option_eager_commit 

Eagerly commit segments (4MiB) (enabled by default).

+
mi_option_purge_decommits 

should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit)

mi_option_eager_region_commit 

Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)

+
mi_option_arena_reserve 

initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use mi_option_get_size)

mi_option_large_os_pages 

Use large OS pages (2MiB in size) if possible.

+
mi_option_os_tag 

tag used for OS logging (macOS only for now) (=100)

mi_option_reserve_huge_os_pages 

The number of huge OS pages (1GiB in size) to reserve at the start of the program.

+
mi_option_retry_on_oom 

retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)

mi_option_reserve_huge_os_pages_at 

Reserve huge OS pages at node N.

+
mi_option_eager_commit 

eager commit segments? (after eager_commit_delay segments) (enabled by default).

mi_option_segment_cache 

The number of segments per thread to keep cached.

+
mi_option_eager_commit_delay 

the first N segments per thread are not eagerly committed (but per page in the segment on demand)

mi_option_page_reset 

Reset page memory after mi_option_reset_delay milliseconds when it becomes free.

+
mi_option_arena_eager_commit 

eager commit arenas? Use 2 to enable just on overcommit systems (=2)

mi_option_segment_reset 

Experimental.

+
mi_option_abandoned_page_purge 

immediately purge delayed purges on thread termination

mi_option_reset_delay 

Delay in milli-seconds before resetting a page (100ms by default)

+
mi_option_purge_delay 

memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10)

mi_option_use_numa_nodes 

Pretend there are at most N NUMA nodes.

+
mi_option_use_numa_nodes 

0 = use all available numa nodes, otherwise use at most N nodes.

mi_option_reset_decommits 

Experimental.

+
mi_option_disallow_os_alloc 

1 = do not use OS memory for allocation (but only programmatically reserved arenas)

mi_option_eager_commit_delay 

Experimental.

+
mi_option_limit_os_alloc 

If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)

mi_option_os_tag 

OS tag to assign to mimalloc'd memory.

+
mi_option_max_segment_reclaim 

max. percentage of the abandoned segments can be reclaimed per try (=10%)

_mi_option_last 
mi_option_destroy_on_exit 

if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe

+
mi_option_arena_purge_mult 

multiplier for purge_delay for the purging delay for arenas (=10)

+
mi_option_abandoned_reclaim_on_free 

allow to reclaim an abandoned segment on a free (=1)

+
mi_option_purge_extend_delay 

extend purge delay on each subsequent delay (=1)

+
mi_option_disallow_arena_alloc 

1 = do not use arena's for allocation (except if using specific arena id's)

+
mi_option_visit_abandoned 

allow visiting heap blocks from abandoned threads (=0)

+
_mi_option_last 

Function Documentation

- -

◆ mi_option_disable()

+ +

◆ mi_option_disable()

@@ -217,8 +269,7 @@

void mi_option_disable ( - mi_option_t  - option) + mi_option_t option) @@ -226,8 +277,8 @@

-

◆ mi_option_enable()

+ +

◆ mi_option_enable()

@@ -235,8 +286,7 @@

void mi_option_enable ( - mi_option_t  - option) + mi_option_t option) @@ -244,8 +294,8 @@

-

◆ mi_option_get()

+ +

◆ mi_option_get()

@@ -253,8 +303,7 @@

long mi_option_get ( - mi_option_t  - option) + mi_option_t option) @@ -262,82 +311,110 @@

-

◆ mi_option_is_enabled()

+ +

◆ mi_option_get_clamp()

- + - - + + + + + + + + + +
bool mi_option_is_enabled long mi_option_get_clamp (mi_option_t option)mi_option_t option,
long min,
long max )
- -

◆ mi_option_set()

+ +

◆ mi_option_get_size()

- + - - - - - + - - +
void mi_option_set size_t mi_option_get_size (mi_option_t option,
mi_option_t option) long value 
+
+ +
+
+ +

◆ mi_option_is_enabled()

+ +
+
+ + + + - -
bool mi_option_is_enabled (mi_option_t option) )
- -

◆ mi_option_set_default()

+ +

◆ mi_option_set()

- + - - + - - + + +
void mi_option_set_default void mi_option_set (mi_option_t option, mi_option_t option,
long value long value )
+
+ +
+
+ +

◆ mi_option_set_default()

+ +
+
+ + + + + + - - +
void mi_option_set_default (mi_option_t option,
)long value )
- -

◆ mi_option_set_enabled()

+ +

◆ mi_option_set_enabled()

@@ -345,27 +422,20 @@

void mi_option_set_enabled ( - mi_option_t  - option, + mi_option_t option, - bool  - enable  - - - - ) - + bool enable )

- -

◆ mi_option_set_enabled_default()

+ +

◆ mi_option_set_enabled_default()

@@ -373,19 +443,12 @@

void mi_option_set_enabled_default ( - mi_option_t  - option, + mi_option_t option, - bool  - enable  - - - - ) - + bool enable )

@@ -397,7 +460,7 @@

diff --git a/depends/mimalloc/docs/group__options.js b/depends/mimalloc/docs/group__options.js index c8836cdc1c0d..b152f7bc33ce 100644 --- a/depends/mimalloc/docs/group__options.js +++ b/depends/mimalloc/docs/group__options.js @@ -4,24 +4,38 @@ var group__options = [ "mi_option_show_errors", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0", null ], [ "mi_option_show_stats", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda", null ], [ "mi_option_verbose", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777", null ], - [ "mi_option_eager_commit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b", null ], - [ "mi_option_eager_region_commit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad", null ], - [ "mi_option_large_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e", null ], + [ "mi_option_max_errors", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caec6ecbe29d46a48205ed8823a8a52a6a", null ], + [ "mi_option_max_warnings", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caf9595921087e942602ee079158762665", null ], [ "mi_option_reserve_huge_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2", null ], [ "mi_option_reserve_huge_os_pages_at", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c", null ], - [ "mi_option_segment_cache", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1", null ], - [ "mi_option_page_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968", null ], - [ "mi_option_segment_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d", null ], - [ "mi_option_reset_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5", null ], - [ "mi_option_use_numa_nodes", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74", null ], - [ "mi_option_reset_decommits", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536", null ], - [ "mi_option_eager_commit_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c", null ], + [ "mi_option_reserve_os_memory", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4999c828cf79a0fb2de65d23f7333", null ], + [ "mi_option_allow_large_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7cc4804ced69004fa42a9a136a9ba556", null ], + [ "mi_option_purge_decommits", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9d15c5e3d2115eef681c17e4dd5ab9a4", null ], + [ "mi_option_arena_reserve", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cab1c88e23ae290bbeec824038a97959de", null ], [ "mi_option_os_tag", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf", null ], + [ "mi_option_retry_on_oom", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8f51df355bf6651db899e6085b54865e", null ], + [ "mi_option_eager_commit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b", null ], + [ "mi_option_eager_commit_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c", null ], + [ "mi_option_arena_eager_commit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafd0c5ddbc4b59fd8b5216871728167a5", null ], + [ "mi_option_abandoned_page_purge", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca11e62ed69200a489a5be955582078c0c", null ], + [ "mi_option_purge_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadd351e615acd8563529c20a347be7290", null ], + [ "mi_option_use_numa_nodes", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74", null ], + [ "mi_option_disallow_os_alloc", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadcfb5a09580361b1be65901d2d812de6", null ], + [ "mi_option_limit_os_alloc", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9fa61bd9668479f8452d2195759444cc", null ], + [ "mi_option_max_segment_reclaim", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa9ad9005d7017c8c30ad2d6ba31db909", null ], + [ "mi_option_destroy_on_exit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca6364331e305e7d3c0218b058ff3afc88", null ], + [ "mi_option_arena_purge_mult", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8236501f1ab45d26e6fd885d191a2b5e", null ], + [ "mi_option_abandoned_reclaim_on_free", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca009e4b5684922ce664d73d2a8e1698d9", null ], + [ "mi_option_purge_extend_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca02005f164bdf03f5f00c5be726adf487", null ], + [ "mi_option_disallow_arena_alloc", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caeae1696100e4057ffc4182730cc04e40", null ], + [ "mi_option_visit_abandoned", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca38c67733a3956a1f4eeaca89fab9e78e", null ], [ "_mi_option_last", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a", null ] ] ], [ "mi_option_disable", "group__options.html#gaebf6ff707a2e688ebb1a2296ca564054", null ], [ "mi_option_enable", "group__options.html#ga04180ae41b0d601421dd62ced40ca050", null ], [ "mi_option_get", "group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a", null ], + [ "mi_option_get_clamp", "group__options.html#ga96ad9c406338bd314cfe878cfc9bf723", null ], + [ "mi_option_get_size", "group__options.html#ga274db5a6ac87cc24ef0b23e7006ed02c", null ], [ "mi_option_is_enabled", "group__options.html#ga459ad98f18b3fc9275474807fe0ca188", null ], [ "mi_option_set", "group__options.html#gaf84921c32375e25754dc2ee6a911fa60", null ], [ "mi_option_set_default", "group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90", null ], diff --git a/depends/mimalloc/docs/group__posix.html b/depends/mimalloc/docs/group__posix.html index 539f7ec68300..27351449ffe0 100644 --- a/depends/mimalloc/docs/group__posix.html +++ b/depends/mimalloc/docs/group__posix.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Posix + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,62 +91,105 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Posix
+
Posix

mi_ prefixed implementations of various Posix, Unix, and C++ allocation functions. More...

- - + + + + + + - + + + - - - - + - + - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - + + + + + - + - +

+

Functions

size_t mi_malloc_size (const void *p)
void mi_cfree (void *p)
 Just as free but also checks if the pointer p belongs to our heap.
 
void * mi__expand (void *p, size_t newsize)
 
size_t mi_malloc_size (const void *p)
 
size_t mi_malloc_usable_size (const void *p)
size_t mi_malloc_good_size (size_t size)
 
size_t mi_malloc_usable_size (const void *p)
 
void mi_cfree (void *p)
 Just as free but also checks if the pointer p belongs to our heap. More...
 
int mi_posix_memalign (void **p, size_t alignment, size_t size)
int mi_posix_memalign (void **p, size_t alignment, size_t size)
 
int mi__posix_memalign (void **p, size_t alignment, size_t size)
int mi__posix_memalign (void **p, size_t alignment, size_t size)
 
void * mi_memalign (size_t alignment, size_t size)
 
void * mi_valloc (size_t size)
 
void * mi_pvalloc (size_t size)
 
void * mi_aligned_alloc (size_t alignment, size_t size)
 
void * mi_reallocarray (void *p, size_t count, size_t size)
 Correspond s to reallocarray in FreeBSD. More...
 
int mi_reallocarr (void *p, size_t count, size_t size)
 Corresponds to reallocarr in NetBSD. More...
void * mi_memalign (size_t alignment, size_t size)
 
void * mi_valloc (size_t size)
 
void * mi_pvalloc (size_t size)
 
void * mi_aligned_alloc (size_t alignment, size_t size)
 
unsigned short * mi_wcsdup (const unsigned short *s)
 
unsigned char * mi_mbsdup (const unsigned char *s)
 
int mi_dupenv_s (char **buf, size_t *size, const char *name)
 
int mi_wdupenv_s (unsigned short **buf, size_t *size, const unsigned short *name)
 
void * mi_reallocarray (void *p, size_t count, size_t size)
 Correspond s to reallocarray in FreeBSD.
 
int mi_reallocarr (void *p, size_t count, size_t size)
 Corresponds to reallocarr in NetBSD.
 
void mi_free_size (void *p, size_t size)
void * mi_aligned_recalloc (void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_aligned_offset_recalloc (void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
void mi_free_size (void *p, size_t size)
 
void mi_free_size_aligned (void *p, size_t size, size_t alignment)
void mi_free_size_aligned (void *p, size_t size, size_t alignment)
 
void mi_free_aligned (void *p, size_t alignment)
void mi_free_aligned (void *p, size_t alignment)
 

Detailed Description

mi_ prefixed implementations of various Posix, Unix, and C++ allocation functions.

Defined for convenience as all redirect to the regular mimalloc API.

Function Documentation

- -

◆ mi__posix_memalign()

+ +

◆ mi__expand()

+ +
+
+ + + + + + + + + + + +
void * mi__expand (void * p,
size_t newsize )
+
+ +
+
+ +

◆ mi__posix_memalign()

+ +

◆ mi_aligned_alloc()

+ +
+
+ + + + + + + + + + + +
void * mi_aligned_alloc (size_t alignment,
size_t size )
+
+ +
+
+ +

◆ mi_aligned_offset_recalloc()

+ +
+
+ + + + + - - + - - + + + + + + + - - +
void * mi_aligned_offset_recalloc (void * p,
size_t alignment, size_t newcount,
size_t size size_t size,
size_t alignment,
)size_t offset )
- -

◆ mi_aligned_alloc()

+ +

◆ mi_aligned_recalloc()

- + - - + - - + + + + + + + - - +
void* mi_aligned_alloc void * mi_aligned_recalloc (size_t alignment, void * p,
size_t size size_t newcount,
size_t size,
)size_t alignment )
- -

◆ mi_cfree()

+ +

◆ mi_cfree()

@@ -213,8 +311,7 @@

void mi_cfree ( - void *  - p) + void * p) @@ -224,64 +321,76 @@

-

◆ mi_free_aligned()

+ +

◆ mi_dupenv_s()

- + - - + - - + + - - +
void mi_free_aligned int mi_dupenv_s (void * p, char ** buf,
size_t alignment size_t * size,
)const char * name )
- -

◆ mi_free_size()

+ +

◆ mi_free_aligned()

- + - - + - - + +
void mi_free_size void mi_free_aligned (void * p, void * p,
size_t size size_t alignment )
+
+ +
+
+ +

◆ mi_free_size()

+ +
+
+ + + + + + + - - +
void mi_free_size (void * p,
)size_t size )
- -

◆ mi_free_size_aligned()

+ +

◆ mi_free_size_aligned()

+ +

◆ mi_malloc_good_size()

+ +
+
+ + + + - -
size_t mi_malloc_good_size (size_t size) )
- -

◆ mi_malloc_size()

+ +

◆ mi_malloc_size()

@@ -323,8 +441,7 @@

size_t mi_malloc_size ( - const void *  - p) + const void * p) @@ -332,8 +449,8 @@

-

◆ mi_malloc_usable_size()

+ +

◆ mi_malloc_usable_size()

@@ -341,8 +458,7 @@

size_t mi_malloc_usable_size ( - const void *  - p) + const void * p) @@ -350,36 +466,46 @@

-

◆ mi_memalign()

+ +

◆ mi_mbsdup()

- + - - + + +
void* mi_memalign unsigned char * mi_mbsdup (size_t alignment, const unsigned char * s)
+
+ +
+
+ +

◆ mi_memalign()

+ +
+
+ - - - - + + + + - - +
size_t size void * mi_memalign (size_t alignment,
)size_t size )
- -

◆ mi_posix_memalign()

+ +

◆ mi_posix_memalign()

- -

◆ mi_pvalloc()

+ +

◆ mi_pvalloc()

- + - - +
void* mi_pvalloc void * mi_pvalloc (size_t size)size_t size)
@@ -430,8 +547,8 @@

-

◆ mi_reallocarr()

+ +

◆ mi_reallocarr()

@@ -466,54 +575,88 @@

-

◆ mi_reallocarray()

+ +

◆ mi_reallocarray()

- + - - + - - + - - + +
void* mi_reallocarray void * mi_reallocarray (void * p, void * p,
size_t count, size_t count,
size_t size size_t size )
+
+ +

Correspond s to reallocarray in FreeBSD.

+ +
+
+ +

◆ mi_valloc()

+ +
+
+ + + + - -
void * mi_valloc (size_t size) )
-

Correspond s to reallocarray in FreeBSD.

+
+
+ +

◆ mi_wcsdup()

+ +
+
+ + + + + + + +
unsigned short * mi_wcsdup (const unsigned short * s)
+
- -

◆ mi_valloc()

+ +

◆ mi_wdupenv_s()

- + - - + + + + + + + + + +
void* mi_valloc int mi_wdupenv_s (size_t size)unsigned short ** buf,
size_t * size,
const unsigned short * name )
@@ -525,7 +668,7 @@

diff --git a/depends/mimalloc/docs/group__posix.js b/depends/mimalloc/docs/group__posix.js index 50c248c80dac..ea4ed269ed5d 100644 --- a/depends/mimalloc/docs/group__posix.js +++ b/depends/mimalloc/docs/group__posix.js @@ -1,17 +1,25 @@ var group__posix = [ + [ "mi__expand", "group__posix.html#ga66bcfeb4faedbb42b796bc680821ef84", null ], [ "mi__posix_memalign", "group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a", null ], - [ "mi_aligned_alloc", "group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5", null ], + [ "mi_aligned_alloc", "group__posix.html#ga430ed1513f0571ff83be00ec58a98ee0", null ], + [ "mi_aligned_offset_recalloc", "group__posix.html#ga16570deddd559001b44953eedbad0084", null ], + [ "mi_aligned_recalloc", "group__posix.html#gaf82cbb4b4f24acf723348628451798d3", null ], [ "mi_cfree", "group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7", null ], + [ "mi_dupenv_s", "group__posix.html#gab41369c1a1da7504013a7a0b1d4dd958", null ], [ "mi_free_aligned", "group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9", null ], [ "mi_free_size", "group__posix.html#gae01389eedab8d67341ff52e2aad80ebb", null ], [ "mi_free_size_aligned", "group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc", null ], + [ "mi_malloc_good_size", "group__posix.html#ga9d23ac7885fed7413c11d8e0ffa31071", null ], [ "mi_malloc_size", "group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de", null ], [ "mi_malloc_usable_size", "group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17", null ], - [ "mi_memalign", "group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e", null ], + [ "mi_mbsdup", "group__posix.html#ga7b82a44094fdec4d2084eb4288a979b0", null ], + [ "mi_memalign", "group__posix.html#ga726867f13fd29ca36064954c0285b1d8", null ], [ "mi_posix_memalign", "group__posix.html#gacff84f226ba9feb2031b8992e5579447", null ], - [ "mi_pvalloc", "group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e", null ], + [ "mi_pvalloc", "group__posix.html#ga644bebccdbb2821542dd8c7e7641f476", null ], [ "mi_reallocarr", "group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5", null ], - [ "mi_reallocarray", "group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088", null ], - [ "mi_valloc", "group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b", null ] + [ "mi_reallocarray", "group__posix.html#gadfeccb72748a2f6305474a37d9d57bce", null ], + [ "mi_valloc", "group__posix.html#ga50cafb9722020402f065de93799f64ca", null ], + [ "mi_wcsdup", "group__posix.html#gaa9fd7f25c9ac3a20e89b33bd6e383fcf", null ], + [ "mi_wdupenv_s", "group__posix.html#ga6ac6a6a8f3c96f1af24bb8d0439cbbd1", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/group__typed.html b/depends/mimalloc/docs/group__typed.html index c19c7f4adbb7..0d3203e9279f 100644 --- a/depends/mimalloc/docs/group__typed.html +++ b/depends/mimalloc/docs/group__typed.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Typed Macros + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,65 +91,71 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Typed Macros
+
Typed Macros

Typed allocation macros. More...

- - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + +

+

Macros

#define mi_malloc_tp(tp)
 Allocate a block of type tp. More...
#define mi_malloc_tp(tp)
 Allocate a block of type tp.
 
#define mi_zalloc_tp(tp)
 Allocate a zero-initialized block of type tp. More...
#define mi_zalloc_tp(tp)
 Allocate a zero-initialized block of type tp.
 
#define mi_calloc_tp(tp, count)
 Allocate count zero-initialized blocks of type tp. More...
#define mi_calloc_tp(tp, count)
 Allocate count zero-initialized blocks of type tp.
 
#define mi_mallocn_tp(tp, count)
 Allocate count blocks of type tp. More...
#define mi_mallocn_tp(tp, count)
 Allocate count blocks of type tp.
 
#define mi_reallocn_tp(p, tp, count)
 Re-allocate to count blocks of type tp. More...
#define mi_reallocn_tp(p, tp, count)
 Re-allocate to count blocks of type tp.
 
#define mi_heap_malloc_tp(hp, tp)
 Allocate a block of type tp in a heap hp. More...
#define mi_heap_malloc_tp(hp, tp)
 Allocate a block of type tp in a heap hp.
 
#define mi_heap_zalloc_tp(hp, tp)
 Allocate a zero-initialized block of type tp in a heap hp. More...
#define mi_heap_zalloc_tp(hp, tp)
 Allocate a zero-initialized block of type tp in a heap hp.
 
#define mi_heap_calloc_tp(hp, tp, count)
 Allocate count zero-initialized blocks of type tp in a heap hp. More...
#define mi_heap_calloc_tp(hp, tp, count)
 Allocate count zero-initialized blocks of type tp in a heap hp.
 
#define mi_heap_mallocn_tp(hp, tp, count)
 Allocate count blocks of type tp in a heap hp. More...
#define mi_heap_mallocn_tp(hp, tp, count)
 Allocate count blocks of type tp in a heap hp.
 
#define mi_heap_reallocn_tp(hp, p, tp, count)
 Re-allocate to count blocks of type tp in a heap hp. More...
#define mi_heap_reallocn_tp(hp, p, tp, count)
 Re-allocate to count blocks of type tp in a heap hp.
 
#define mi_heap_recalloc_tp(hp, p, tp, count)
 Re-allocate to count zero initialized blocks of type tp in a heap hp. More...
#define mi_heap_recalloc_tp(hp, p, tp, count)
 Re-allocate to count zero initialized blocks of type tp in a heap hp.
 

Detailed Description

Typed allocation macros.

-

For example:

int* p = mi_malloc_tp(int)
-
#define mi_malloc_tp(tp)
Allocate a block of type tp.
Definition: mimalloc-doc.h:692
+

For example:

int* p = mi_malloc_tp(int)
+
#define mi_malloc_tp(tp)
Allocate a block of type tp.
Definition mimalloc-doc.h:784

Macro Definition Documentation

- -

◆ mi_calloc_tp

+ +

◆ mi_calloc_tp

- -

◆ mi_heap_calloc_tp

+ +

◆ mi_heap_calloc_tp

- -

◆ mi_heap_malloc_tp

+ +

◆ mi_heap_malloc_tp

- -

◆ mi_heap_mallocn_tp

+ +

◆ mi_heap_mallocn_tp

- -

◆ mi_heap_reallocn_tp

+ +

◆ mi_heap_reallocn_tp

- -

◆ mi_heap_recalloc_tp

+ +

◆ mi_heap_recalloc_tp

- -

◆ mi_heap_zalloc_tp

+ +

◆ mi_heap_zalloc_tp

- -

◆ mi_malloc_tp

+ +

◆ mi_malloc_tp

- -

◆ mi_mallocn_tp

+ +

◆ mi_mallocn_tp

- -

◆ mi_reallocn_tp

+ +

◆ mi_reallocn_tp

- -

◆ mi_zalloc_tp

+ +

◆ mi_zalloc_tp

@@ -511,7 +448,7 @@

diff --git a/depends/mimalloc/docs/group__zeroinit.html b/depends/mimalloc/docs/group__zeroinit.html index 329a7739d207..6ae8ff87ea9b 100644 --- a/depends/mimalloc/docs/group__zeroinit.html +++ b/depends/mimalloc/docs/group__zeroinit.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Zero initialized re-allocation + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@

@@ -88,491 +91,397 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
Zero initialized re-allocation
+
Zero initialized re-allocation

The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too. More...

- - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + +

+

Functions

void * mi_rezalloc (void *p, size_t newsize)
 
void * mi_rezalloc_aligned (void *p, size_t newsize, size_t alignment)
 
void * mi_rezalloc_aligned_at (void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_recalloc_aligned (void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_recalloc_aligned_at (void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_rezalloc (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_recalloc (mi_heap_t *heap, void *p, size_t newcount, size_t size)
 
void * mi_heap_rezalloc_aligned (mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
 
void * mi_heap_rezalloc_aligned_at (mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_heap_recalloc_aligned (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_heap_recalloc_aligned_at (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
void * mi_rezalloc (void *p, size_t newsize)
 
void * mi_rezalloc_aligned (void *p, size_t newsize, size_t alignment)
 
void * mi_rezalloc_aligned_at (void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_recalloc_aligned (void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_recalloc_aligned_at (void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_rezalloc (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_recalloc (mi_heap_t *heap, void *p, size_t newcount, size_t size)
 
void * mi_heap_rezalloc_aligned (mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
 
void * mi_heap_rezalloc_aligned_at (mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_heap_recalloc_aligned (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_heap_recalloc_aligned_at (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 

Detailed Description

The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too.

e.g. mi_calloc, mi_zalloc, mi_zalloc_aligned etc. see https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992

Function Documentation

- -

◆ mi_heap_recalloc()

+ +

◆ mi_heap_recalloc()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_recalloc void * mi_heap_recalloc (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newcount, size_t newcount,
size_t size 
)size_t size )
- -

◆ mi_heap_recalloc_aligned()

+ +

◆ mi_heap_recalloc_aligned()

- + - - + - - + - - + - - + - - - - - - - +
void* mi_heap_recalloc_aligned void * mi_heap_recalloc_aligned (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newcount, size_t newcount,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_recalloc_aligned_at()

+ +

◆ mi_heap_recalloc_aligned_at()

- + - - + - - + - - + - - + - - + - - - - - - - +
void* mi_heap_recalloc_aligned_at void * mi_heap_recalloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newcount, size_t newcount,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_heap_rezalloc()

+ +

◆ mi_heap_rezalloc()

- + - - + - - + - - - - - - - +
void* mi_heap_rezalloc void * mi_heap_rezalloc (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize 
)size_t newsize )
- -

◆ mi_heap_rezalloc_aligned()

+ +

◆ mi_heap_rezalloc_aligned()

- + - - + - - + - - + - - - - - - - +
void* mi_heap_rezalloc_aligned void * mi_heap_rezalloc_aligned (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment 
)size_t alignment )
- -

◆ mi_heap_rezalloc_aligned_at()

+ +

◆ mi_heap_rezalloc_aligned_at()

- + - - + - - + - - + - - + - - - - - - - +
void* mi_heap_rezalloc_aligned_at void * mi_heap_rezalloc_aligned_at (mi_heap_theap, mi_heap_t * heap,
void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_recalloc_aligned()

+ +

◆ mi_recalloc_aligned()

- + - - + - - + - - + - - - - - - - +
void* mi_recalloc_aligned void * mi_recalloc_aligned (void * p, void * p,
size_t newcount, size_t newcount,
size_t size, size_t size,
size_t alignment 
)size_t alignment )
- -

◆ mi_recalloc_aligned_at()

+ +

◆ mi_recalloc_aligned_at()

- + - - + - - + - - + - - + - - - - - - - +
void* mi_recalloc_aligned_at void * mi_recalloc_aligned_at (void * p, void * p,
size_t newcount, size_t newcount,
size_t size, size_t size,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
- -

◆ mi_rezalloc()

+ +

◆ mi_rezalloc()

- + - - + - - - - - - - +
void* mi_rezalloc void * mi_rezalloc (void * p, void * p,
size_t newsize 
)size_t newsize )
- -

◆ mi_rezalloc_aligned()

+ +

◆ mi_rezalloc_aligned()

- + - - + - - + - - - - - - - +
void* mi_rezalloc_aligned void * mi_rezalloc_aligned (void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment 
)size_t alignment )
- -

◆ mi_rezalloc_aligned_at()

+ +

◆ mi_rezalloc_aligned_at()

- + - - + - - + - - + - - - - - - - +
void* mi_rezalloc_aligned_at void * mi_rezalloc_aligned_at (void * p, void * p,
size_t newsize, size_t newsize,
size_t alignment, size_t alignment,
size_t offset 
)size_t offset )
@@ -584,7 +493,7 @@

diff --git a/depends/mimalloc/docs/group__zeroinit.js b/depends/mimalloc/docs/group__zeroinit.js index b9297d2123f8..505df60222d2 100644 --- a/depends/mimalloc/docs/group__zeroinit.js +++ b/depends/mimalloc/docs/group__zeroinit.js @@ -1,14 +1,14 @@ var group__zeroinit = [ - [ "mi_heap_recalloc", "group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd", null ], - [ "mi_heap_recalloc_aligned", "group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3", null ], - [ "mi_heap_recalloc_aligned_at", "group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7", null ], - [ "mi_heap_rezalloc", "group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76", null ], - [ "mi_heap_rezalloc_aligned", "group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664", null ], - [ "mi_heap_rezalloc_aligned_at", "group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb", null ], - [ "mi_recalloc_aligned", "group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f", null ], - [ "mi_recalloc_aligned_at", "group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9", null ], - [ "mi_rezalloc", "group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6", null ], - [ "mi_rezalloc_aligned", "group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0", null ], - [ "mi_rezalloc_aligned_at", "group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1", null ] + [ "mi_heap_recalloc", "group__zeroinit.html#gad1a0d325d930eeb80f25e3fea37aacde", null ], + [ "mi_heap_recalloc_aligned", "group__zeroinit.html#ga87ddd674bf1c67237d780d0b9e0f0f32", null ], + [ "mi_heap_recalloc_aligned_at", "group__zeroinit.html#ga07b5bcbaf00d0d2e598c232982588496", null ], + [ "mi_heap_rezalloc", "group__zeroinit.html#ga8d8b7ebb24b513cd84d1a696048da60d", null ], + [ "mi_heap_rezalloc_aligned", "group__zeroinit.html#ga5129f6dc46ee1613d918820a8a0533a7", null ], + [ "mi_heap_rezalloc_aligned_at", "group__zeroinit.html#ga2bafa79c3f98ea74882349d44cffa5d9", null ], + [ "mi_recalloc_aligned", "group__zeroinit.html#ga3e2169b48683aa0ab64f813fd68d839e", null ], + [ "mi_recalloc_aligned_at", "group__zeroinit.html#gaae25e4ddedd4e0fb61b1a8bd5d452750", null ], + [ "mi_rezalloc", "group__zeroinit.html#gadfd34cd7b4f2bbda7ae06367a6360756", null ], + [ "mi_rezalloc_aligned", "group__zeroinit.html#ga4d02404fe1e7db00beb65f185e012caa", null ], + [ "mi_rezalloc_aligned_at", "group__zeroinit.html#ga6843a88285bbfcc3bdfccc60aafd1270", null ] ]; \ No newline at end of file diff --git a/depends/mimalloc/docs/index.html b/depends/mimalloc/docs/index.html index 2ea91215ea9e..b3c8a630ad72 100644 --- a/depends/mimalloc/docs/index.html +++ b/depends/mimalloc/docs/index.html @@ -1,24 +1,26 @@ - + - - + + -mi-malloc: Main Page +mi-malloc: mi-malloc + - + + @@ -29,20 +31,16 @@
- + - -
-
mi-malloc -  1.7/2.0 +
+
mi-malloc 1.8/2.1
+
- -   + @@ -56,10 +54,15 @@
- + +
@@ -74,8 +77,8 @@
@@ -88,28 +91,33 @@
- +
+
+
+
+
Loading...
+
Searching...
+
No Matches
+
+
+
-
-
-
mi-malloc Documentation
+
+
mi-malloc

This is the API documentation of the mimalloc allocator (pronounced "me-malloc") – a general purpose allocator with excellent performance characteristics. Initially developed by Daan Leijen for the run-time systems of the Koka and Lean languages.

It is a drop-in replacement for malloc and can be used in other programs without code changes, for example, on Unix you can use it as:

> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram
-

Notable aspects of the design include:

-
    -
  • small and consistent: the library is about 8k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic heartbeat and deferred freeing (for bounded worst-case times with reference counting).
  • +

Notable aspects of the design include:

    +
  • small and consistent: the library is about 8k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic heartbeat and deferred freeing (for bounded worst-case times with reference counting). Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS, Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding. At the same time, it is an industrial strength allocator that runs (very) large scale distributed services on thousands of machines with excellent worst case latencies.
  • free list sharding: instead of one big free list (per size class) we have many smaller lists per "mimalloc page" which reduces fragmentation and increases locality – things that are allocated close in time get allocated close in memory. (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system).
  • free list multi-sharding: the big idea! Not only do we shard the free list per mimalloc page, but for each page we have multiple free lists. In particular, there is one list for thread-local free operations, and another one for concurrent free operations. Free-ing from another thread can now be a single CAS without needing sophisticated coordination between threads. Since there will be thousands of separate free lists, contention is naturally distributed over the heap, and the chance of contending on a single location will be low – this is quite similar to randomized algorithms like skip lists where adding a random oracle removes the need for a more complex algorithm.
  • -
  • eager page reset: when a "page" becomes empty (with increased chance due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") reducing (real) memory pressure and fragmentation, especially in long running programs.
  • -
  • secure: mimalloc can be build in secure mode, adding guard pages, randomized allocation, encrypted free lists, etc. to protect against various heap vulnerabilities. The performance penalty is only around 5% on average over our benchmarks.
  • +
  • eager page purging: when a "page" becomes empty (with increased chance due to free list sharding) the memory is marked to the OS as unused (reset or decommitted) reducing (real) memory pressure and fragmentation, especially in long running programs.
  • +
  • secure: mimalloc can be built in secure mode, adding guard pages, randomized allocation, encrypted free lists, etc. to protect against various heap vulnerabilities. The performance penalty is usually around 10% on average over our benchmarks.
  • first-class heaps: efficiently create and use multiple heaps to allocate across different regions. A heap can be destroyed at once instead of deallocating each object separately.
  • -
  • bounded: it does not suffer from blowup [1], has bounded worst-case allocation times (wcat), bounded space overhead (~0.2% meta-data, with at most 12.5% waste in allocation sizes), and has no internal points of contention using only atomic operations.
  • -
  • fast: In our benchmarks (see below), mimalloc outperforms all other leading allocators (jemalloc, tcmalloc, Hoard, etc), and usually uses less memory (up to 25% more in the worst case). A nice property is that it does consistently well over a wide range of benchmarks.
  • +
  • bounded: it does not suffer from blowup [1], has bounded worst-case allocation times (wcat) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low internal fragmentation), and has no internal points of contention using only atomic operations.
  • +
  • fast: In our benchmarks (see below), mimalloc outperforms other leading allocators (jemalloc, tcmalloc, Hoard, etc), and often uses less memory. A nice property is that it does consistently well over a wide range of benchmarks. There is also good huge OS page support for larger server programs.

You can read more on the design of mimalloc in the technical report which also has detailed benchmark results.

Further information:

@@ -130,12 +138,13 @@
  • C++ wrappers
  • +
    diff --git a/depends/mimalloc/docs/jquery.js b/depends/mimalloc/docs/jquery.js index 103c32d79b74..875ada738f09 100644 --- a/depends/mimalloc/docs/jquery.js +++ b/depends/mimalloc/docs/jquery.js @@ -1,18 +1,143 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
    ",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp( +"^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"�":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType +}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c +)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){ +return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll( +":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id") +)&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push( +"\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test( +a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null, +null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne +).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for( +var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};function ve(e,t){var n; +return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0, +r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r] +,C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
    ",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each( +function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r, +"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})} +),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each( +"blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0a;a++)for(i in o[a])n=o[a][i],o[a].hasOwnProperty(i)&&void 0!==n&&(e[i]=t.isPlainObject(n)?t.isPlainObject(e[i])?t.widget.extend({},e[i],n):t.widget.extend({},n):n);return e},t.widget.bridge=function(e,i){var n=i.prototype.widgetFullName||e;t.fn[e]=function(o){var a="string"==typeof o,r=s.call(arguments,1),h=this;return a?this.length||"instance"!==o?this.each(function(){var i,s=t.data(this,n);return"instance"===o?(h=s,!1):s?t.isFunction(s[o])&&"_"!==o.charAt(0)?(i=s[o].apply(s,r),i!==s&&void 0!==i?(h=i&&i.jquery?h.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+o+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+o+"'")}):h=void 0:(r.length&&(o=t.widget.extend.apply(null,[o].concat(r))),this.each(function(){var e=t.data(this,n);e?(e.option(o||{}),e._init&&e._init()):t.data(this,n,new i(o,this))})),h}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"
    ",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,s){s=t(s||this.defaultElement||this)[0],this.element=t(s),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},s!==this&&(t.data(s,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===s&&this.destroy()}}),this.document=t(s.style?s.ownerDocument:s.document||s),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t("
    "),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,m=-2*e.offset[1];0>c?(s=t.top+p+f+m+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+m)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+m-h,(i>0||u>a(i))&&(t.top+=p+f+m))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}}),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,./:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.ui.ie=!!/msie [\w.]+/.exec(navigator.userAgent.toLowerCase());var n=!1;t(document).on("mouseup",function(){n=!1}),t.widget("ui.mouse",{version:"1.12.1",options:{cancel:"input, textarea, button, select, option",distance:1,delay:0},_mouseInit:function(){var e=this;this.element.on("mousedown."+this.widgetName,function(t){return e._mouseDown(t)}).on("click."+this.widgetName,function(i){return!0===t.data(i.target,e.widgetName+".preventClickEvent")?(t.removeData(i.target,e.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):void 0}),this.started=!1},_mouseDestroy:function(){this.element.off("."+this.widgetName),this._mouseMoveDelegate&&this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(e){if(!n){this._mouseMoved=!1,this._mouseStarted&&this._mouseUp(e),this._mouseDownEvent=e;var i=this,s=1===e.which,o="string"==typeof this.options.cancel&&e.target.nodeName?t(e.target).closest(this.options.cancel).length:!1;return s&&!o&&this._mouseCapture(e)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){i.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(e)!==!1,!this._mouseStarted)?(e.preventDefault(),!0):(!0===t.data(e.target,this.widgetName+".preventClickEvent")&&t.removeData(e.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(t){return i._mouseMove(t)},this._mouseUpDelegate=function(t){return i._mouseUp(t)},this.document.on("mousemove."+this.widgetName,this._mouseMoveDelegate).on("mouseup."+this.widgetName,this._mouseUpDelegate),e.preventDefault(),n=!0,!0)):!0}},_mouseMove:function(e){if(this._mouseMoved){if(t.ui.ie&&(!document.documentMode||9>document.documentMode)&&!e.button)return this._mouseUp(e);if(!e.which)if(e.originalEvent.altKey||e.originalEvent.ctrlKey||e.originalEvent.metaKey||e.originalEvent.shiftKey)this.ignoreMissingWhich=!0;else if(!this.ignoreMissingWhich)return this._mouseUp(e)}return(e.which||e.button)&&(this._mouseMoved=!0),this._mouseStarted?(this._mouseDrag(e),e.preventDefault()):(this._mouseDistanceMet(e)&&this._mouseDelayMet(e)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,e)!==!1,this._mouseStarted?this._mouseDrag(e):this._mouseUp(e)),!this._mouseStarted)},_mouseUp:function(e){this.document.off("mousemove."+this.widgetName,this._mouseMoveDelegate).off("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,e.target===this._mouseDownEvent.target&&t.data(e.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(e)),this._mouseDelayTimer&&(clearTimeout(this._mouseDelayTimer),delete this._mouseDelayTimer),this.ignoreMissingWhich=!1,n=!1,e.preventDefault()},_mouseDistanceMet:function(t){return Math.max(Math.abs(this._mouseDownEvent.pageX-t.pageX),Math.abs(this._mouseDownEvent.pageY-t.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),t.ui.plugin={add:function(e,i,s){var n,o=t.ui[e].prototype;for(n in s)o.plugins[n]=o.plugins[n]||[],o.plugins[n].push([i,s[n]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;o.length>n;n++)t.options[o[n][0]]&&o[n][1].apply(t.element,i)}},t.widget("ui.resizable",t.ui.mouse,{version:"1.12.1",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,classes:{"ui-resizable-se":"ui-icon ui-icon-gripsmall-diagonal-se"},containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_num:function(t){return parseFloat(t)||0},_isNumber:function(t){return!isNaN(parseFloat(t))},_hasScroll:function(e,i){if("hidden"===t(e).css("overflow"))return!1;var s=i&&"left"===i?"scrollLeft":"scrollTop",n=!1;return e[s]>0?!0:(e[s]=1,n=e[s]>0,e[s]=0,n)},_create:function(){var e,i=this.options,s=this;this._addClass("ui-resizable"),t.extend(this,{_aspectRatio:!!i.aspectRatio,aspectRatio:i.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:i.helper||i.ghost||i.animate?i.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/^(canvas|textarea|input|select|button|img)$/i)&&(this.element.wrap(t("
    ").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,e={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(e),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(e),this._proportionallyResize()),this._setupHandles(),i.autoHide&&t(this.element).on("mouseenter",function(){i.disabled||(s._removeClass("ui-resizable-autohide"),s._handles.show())}).on("mouseleave",function(){i.disabled||s.resizing||(s._addClass("ui-resizable-autohide"),s._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy();var e,i=function(e){t(e).removeData("resizable").removeData("ui-resizable").off(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;default:}},_setupHandles:function(){var e,i,s,n,o,a=this.options,r=this;if(this.handles=a.handles||(t(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=t(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),s=this.handles.split(","),this.handles={},i=0;s.length>i;i++)e=t.trim(s[i]),n="ui-resizable-"+e,o=t("
    "),this._addClass(o,"ui-resizable-handle "+n),o.css({zIndex:a.zIndex}),this.handles[e]=".ui-resizable-"+e,this.element.append(o);this._renderAxis=function(e){var i,s,n,o;e=e||this.element;for(i in this.handles)this.handles[i].constructor===String?this.handles[i]=this.element.children(this.handles[i]).first().show():(this.handles[i].jquery||this.handles[i].nodeType)&&(this.handles[i]=t(this.handles[i]),this._on(this.handles[i],{mousedown:r._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(s=t(this.handles[i],this.element),o=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),e.css(n,o),this._proportionallyResize()),this._handles=this._handles.add(this.handles[i])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){r.resizing||(this.className&&(o=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),r.axis=o&&o[1]?o[1]:"se")}),a.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._handles.remove()},_mouseCapture:function(e){var i,s,n=!1;for(i in this.handles)s=t(this.handles[i])[0],(s===e.target||t.contains(s,e.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(e){var i,s,n,o=this.options,a=this.element;return this.resizing=!0,this._renderProxy(),i=this._num(this.helper.css("left")),s=this._num(this.helper.css("top")),o.containment&&(i+=t(o.containment).scrollLeft()||0,s+=t(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:i,top:s},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{width:a.width(),height:a.height()},this.originalSize=this._helper?{width:a.outerWidth(),height:a.outerHeight()}:{width:a.width(),height:a.height()},this.sizeDiff={width:a.outerWidth()-a.width(),height:a.outerHeight()-a.height()},this.originalPosition={left:i,top:s},this.originalMousePosition={left:e.pageX,top:e.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,n=t(".ui-resizable-"+this.axis).css("cursor"),t("body").css("cursor","auto"===n?this.axis+"-resize":n),this._addClass("ui-resizable-resizing"),this._propagate("start",e),!0},_mouseDrag:function(e){var i,s,n=this.originalMousePosition,o=this.axis,a=e.pageX-n.left||0,r=e.pageY-n.top||0,h=this._change[o];return this._updatePrevProperties(),h?(i=h.apply(this,[e,a,r]),this._updateVirtualBoundaries(e.shiftKey),(this._aspectRatio||e.shiftKey)&&(i=this._updateRatio(i,e)),i=this._respectSize(i,e),this._updateCache(i),this._propagate("resize",e),s=this._applyChanges(),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),t.isEmptyObject(s)||(this._updatePrevProperties(),this._trigger("resize",e,this.ui()),this._applyChanges()),!1):!1},_mouseStop:function(e){this.resizing=!1;var i,s,n,o,a,r,h,l=this.options,c=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&this._hasScroll(i[0],"left")?0:c.sizeDiff.height,o=s?0:c.sizeDiff.width,a={width:c.helper.width()-o,height:c.helper.height()-n},r=parseFloat(c.element.css("left"))+(c.position.left-c.originalPosition.left)||null,h=parseFloat(c.element.css("top"))+(c.position.top-c.originalPosition.top)||null,l.animate||this.element.css(t.extend(a,{top:h,left:r})),c.helper.height(c.size.height),c.helper.width(c.size.width),this._helper&&!l.animate&&this._proportionallyResize()),t("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",e),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s,n,o,a=this.options;o={minWidth:this._isNumber(a.minWidth)?a.minWidth:0,maxWidth:this._isNumber(a.maxWidth)?a.maxWidth:1/0,minHeight:this._isNumber(a.minHeight)?a.minHeight:0,maxHeight:this._isNumber(a.maxHeight)?a.maxHeight:1/0},(this._aspectRatio||t)&&(e=o.minHeight*this.aspectRatio,s=o.minWidth/this.aspectRatio,i=o.maxHeight*this.aspectRatio,n=o.maxWidth/this.aspectRatio,e>o.minWidth&&(o.minWidth=e),s>o.minHeight&&(o.minHeight=s),o.maxWidth>i&&(o.maxWidth=i),o.maxHeight>n&&(o.maxHeight=n)),this._vBoundaries=o},_updateCache:function(t){this.offset=this.helper.offset(),this._isNumber(t.left)&&(this.position.left=t.left),this._isNumber(t.top)&&(this.position.top=t.top),this._isNumber(t.height)&&(this.size.height=t.height),this._isNumber(t.width)&&(this.size.width=t.width)},_updateRatio:function(t){var e=this.position,i=this.size,s=this.axis;return this._isNumber(t.height)?t.width=t.height*this.aspectRatio:this._isNumber(t.width)&&(t.height=t.width/this.aspectRatio),"sw"===s&&(t.left=e.left+(i.width-t.width),t.top=null),"nw"===s&&(t.top=e.top+(i.height-t.height),t.left=e.left+(i.width-t.width)),t},_respectSize:function(t){var e=this._vBoundaries,i=this.axis,s=this._isNumber(t.width)&&e.maxWidth&&e.maxWidtht.width,a=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,r=this.originalPosition.left+this.originalSize.width,h=this.originalPosition.top+this.originalSize.height,l=/sw|nw|w/.test(i),c=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),a&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=r-e.minWidth),s&&l&&(t.left=r-e.maxWidth),a&&c&&(t.top=h-e.minHeight),n&&c&&(t.top=h-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];4>e;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;this._proportionallyResizeElements.length>e;e++)t=this._proportionallyResizeElements[e],this.outerDimensions||(this.outerDimensions=this._getPaddingPlusBorderDimensions(t)),t.css({height:i.height()-this.outerDimensions.height||0,width:i.width()-this.outerDimensions.width||0})},_renderProxy:function(){var e=this.element,i=this.options;this.elementOffset=e.offset(),this._helper?(this.helper=this.helper||t("
    "),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element -},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize,s=this.originalPosition;return{left:s.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},sw:function(e,i,s){return t.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[e,i,s]))},ne:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[e,i,s]))},nw:function(e,i,s){return t.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[e,i,s]))}},_propagate:function(e,i){t.ui.plugin.call(this,e,[i,this.ui()]),"resize"!==e&&this._trigger(e,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),t.ui.plugin.add("resizable","animate",{stop:function(e){var i=t(this).resizable("instance"),s=i.options,n=i._proportionallyResizeElements,o=n.length&&/textarea/i.test(n[0].nodeName),a=o&&i._hasScroll(n[0],"left")?0:i.sizeDiff.height,r=o?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-a},l=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left)||null,c=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(t.extend(h,c&&l?{top:c,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};n&&n.length&&t(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",e)}})}}),t.ui.plugin.add("resizable","containment",{start:function(){var e,i,s,n,o,a,r,h=t(this).resizable("instance"),l=h.options,c=h.element,u=l.containment,d=u instanceof t?u.get(0):/parent/.test(u)?c.parent().get(0):u;d&&(h.containerElement=t(d),/document/.test(u)||u===document?(h.containerOffset={left:0,top:0},h.containerPosition={left:0,top:0},h.parentData={element:t(document),left:0,top:0,width:t(document).width(),height:t(document).height()||document.body.parentNode.scrollHeight}):(e=t(d),i=[],t(["Top","Right","Left","Bottom"]).each(function(t,s){i[t]=h._num(e.css("padding"+s))}),h.containerOffset=e.offset(),h.containerPosition=e.position(),h.containerSize={height:e.innerHeight()-i[3],width:e.innerWidth()-i[1]},s=h.containerOffset,n=h.containerSize.height,o=h.containerSize.width,a=h._hasScroll(d,"left")?d.scrollWidth:o,r=h._hasScroll(d)?d.scrollHeight:n,h.parentData={element:d,left:s.left,top:s.top,width:a,height:r}))},resize:function(e){var i,s,n,o,a=t(this).resizable("instance"),r=a.options,h=a.containerOffset,l=a.position,c=a._aspectRatio||e.shiftKey,u={top:0,left:0},d=a.containerElement,p=!0;d[0]!==document&&/static/.test(d.css("position"))&&(u=h),l.left<(a._helper?h.left:0)&&(a.size.width=a.size.width+(a._helper?a.position.left-h.left:a.position.left-u.left),c&&(a.size.height=a.size.width/a.aspectRatio,p=!1),a.position.left=r.helper?h.left:0),l.top<(a._helper?h.top:0)&&(a.size.height=a.size.height+(a._helper?a.position.top-h.top:a.position.top),c&&(a.size.width=a.size.height*a.aspectRatio,p=!1),a.position.top=a._helper?h.top:0),n=a.containerElement.get(0)===a.element.parent().get(0),o=/relative|absolute/.test(a.containerElement.css("position")),n&&o?(a.offset.left=a.parentData.left+a.position.left,a.offset.top=a.parentData.top+a.position.top):(a.offset.left=a.element.offset().left,a.offset.top=a.element.offset().top),i=Math.abs(a.sizeDiff.width+(a._helper?a.offset.left-u.left:a.offset.left-h.left)),s=Math.abs(a.sizeDiff.height+(a._helper?a.offset.top-u.top:a.offset.top-h.top)),i+a.size.width>=a.parentData.width&&(a.size.width=a.parentData.width-i,c&&(a.size.height=a.size.width/a.aspectRatio,p=!1)),s+a.size.height>=a.parentData.height&&(a.size.height=a.parentData.height-s,c&&(a.size.width=a.size.height*a.aspectRatio,p=!1)),p||(a.position.left=a.prevPosition.left,a.position.top=a.prevPosition.top,a.size.width=a.prevSize.width,a.size.height=a.prevSize.height)},stop:function(){var e=t(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.containerPosition,o=e.containerElement,a=t(e.helper),r=a.offset(),h=a.outerWidth()-e.sizeDiff.width,l=a.outerHeight()-e.sizeDiff.height;e._helper&&!i.animate&&/relative/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l}),e._helper&&!i.animate&&/static/.test(o.css("position"))&&t(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),t.ui.plugin.add("resizable","alsoResize",{start:function(){var e=t(this).resizable("instance"),i=e.options;t(i.alsoResize).each(function(){var e=t(this);e.data("ui-resizable-alsoresize",{width:parseFloat(e.width()),height:parseFloat(e.height()),left:parseFloat(e.css("left")),top:parseFloat(e.css("top"))})})},resize:function(e,i){var s=t(this).resizable("instance"),n=s.options,o=s.originalSize,a=s.originalPosition,r={height:s.size.height-o.height||0,width:s.size.width-o.width||0,top:s.position.top-a.top||0,left:s.position.left-a.left||0};t(n.alsoResize).each(function(){var e=t(this),s=t(this).data("ui-resizable-alsoresize"),n={},o=e.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];t.each(o,function(t,e){var i=(s[e]||0)+(r[e]||0);i&&i>=0&&(n[e]=i||null)}),e.css(n)})},stop:function(){t(this).removeData("ui-resizable-alsoresize")}}),t.ui.plugin.add("resizable","ghost",{start:function(){var e=t(this).resizable("instance"),i=e.size;e.ghost=e.originalElement.clone(),e.ghost.css({opacity:.25,display:"block",position:"relative",height:i.height,width:i.width,margin:0,left:0,top:0}),e._addClass(e.ghost,"ui-resizable-ghost"),t.uiBackCompat!==!1&&"string"==typeof e.options.ghost&&e.ghost.addClass(this.options.ghost),e.ghost.appendTo(e.helper)},resize:function(){var e=t(this).resizable("instance");e.ghost&&e.ghost.css({position:"relative",height:e.size.height,width:e.size.width})},stop:function(){var e=t(this).resizable("instance");e.ghost&&e.helper&&e.helper.get(0).removeChild(e.ghost.get(0))}}),t.ui.plugin.add("resizable","grid",{resize:function(){var e,i=t(this).resizable("instance"),s=i.options,n=i.size,o=i.originalSize,a=i.originalPosition,r=i.axis,h="number"==typeof s.grid?[s.grid,s.grid]:s.grid,l=h[0]||1,c=h[1]||1,u=Math.round((n.width-o.width)/l)*l,d=Math.round((n.height-o.height)/c)*c,p=o.width+u,f=o.height+d,m=s.maxWidth&&p>s.maxWidth,g=s.maxHeight&&f>s.maxHeight,_=s.minWidth&&s.minWidth>p,v=s.minHeight&&s.minHeight>f;s.grid=h,_&&(p+=l),v&&(f+=c),m&&(p-=l),g&&(f-=c),/^(se|s|e)$/.test(r)?(i.size.width=p,i.size.height=f):/^(ne)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.top=a.top-d):/^(sw)$/.test(r)?(i.size.width=p,i.size.height=f,i.position.left=a.left-u):((0>=f-c||0>=p-l)&&(e=i._getPaddingPlusBorderDimensions(this)),f-c>0?(i.size.height=f,i.position.top=a.top-d):(f=c-e.height,i.size.height=f,i.position.top=a.top+o.height-f),p-l>0?(i.size.width=p,i.position.left=a.left-u):(p=l-e.width,i.size.width=p,i.position.left=a.left+o.width-p))}}),t.ui.resizable});/** +* Includes: widget.js, position.js, data.js, disable-selection.js, focusable.js, form-reset-mixin.js, jquery-patch.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/resizable.js, widgets/mouse.js +* Copyright jQuery Foundation and other contributors; Licensed MIT */!function(t){"use strict";"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)}(function(y){"use strict";y.ui=y.ui||{};y.ui.version="1.13.2";var n,i=0,h=Array.prototype.hasOwnProperty,a=Array.prototype.slice;y.cleanData=(n=y.cleanData,function(t){for(var e,i,s=0;null!=(i=t[s]);s++)(e=y._data(i,"events"))&&e.remove&&y(i).triggerHandler("remove");n(t)}),y.widget=function(t,i,e){var s,n,o,h={},a=t.split(".")[0],r=a+"-"+(t=t.split(".")[1]);return e||(e=i,i=y.Widget),Array.isArray(e)&&(e=y.extend.apply(null,[{}].concat(e))),y.expr.pseudos[r.toLowerCase()]=function(t){return!!y.data(t,r)},y[a]=y[a]||{},s=y[a][t],n=y[a][t]=function(t,e){if(!this||!this._createWidget)return new n(t,e);arguments.length&&this._createWidget(t,e)},y.extend(n,s,{version:e.version,_proto:y.extend({},e),_childConstructors:[]}),(o=new i).options=y.widget.extend({},o.options),y.each(e,function(e,s){function n(){return i.prototype[e].apply(this,arguments)} +function o(t){return i.prototype[e].apply(this,t)}h[e]="function"==typeof s?function(){var t,e=this._super,i=this._superApply;return this._super=n,this._superApply=o,t=s.apply(this,arguments),this._super=e,this._superApply=i,t}:s}),n.prototype=y.widget.extend(o,{widgetEventPrefix:s&&o.widgetEventPrefix||t},h,{constructor:n,namespace:a,widgetName:t,widgetFullName:r}),s?(y.each(s._childConstructors,function(t,e){var i=e.prototype;y.widget(i.namespace+"."+i.widgetName,n,e._proto)}),delete s._childConstructors):i._childConstructors.push(n),y.widget.bridge(t,n),n},y.widget.extend=function(t){for(var e,i,s=a.call(arguments,1),n=0,o=s.length;n",options:{classes:{},disabled:!1,create:null},_createWidget:function(t,e){e=y(e||this.defaultElement||this)[0],this.element=y(e),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=y(),this.hoverable=y(),this.focusable=y(),this.classesElementLookup={},e!==this&&(y.data(e,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t +){t.target===e&&this.destroy()}}),this.document=y(e.style?e.ownerDocument:e.document||e),this.window=y(this.document[0].defaultView||this.document[0].parentWindow)),this.options=y.widget.extend({},this.options,this._getCreateOptions(),t),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:y.noop,_create:y.noop,_init:y.noop,destroy:function(){var i=this;this._destroy(),y.each(this.classesElementLookup,function(t,e){i._removeClass(e,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:y.noop,widget:function(){return this.element},option:function(t,e){var i,s,n,o=t;if(0===arguments.length)return y.widget.extend({},this.options);if("string"==typeof t)if(o={},t=(i=t.split(".")).shift(),i.length){for(s=o[t +]=y.widget.extend({},this.options[t]),n=0;n
    "),i=e.children()[0];return y("body").append(e),t=i.offsetWidth,e.css("overflow","scroll"),t===(i=i.offsetWidth)&&(i=e[0].clientWidth),e.remove(),s=t-i}, +getScrollInfo:function(t){var e=t.isWindow||t.isDocument?"":t.element.css("overflow-x"),i=t.isWindow||t.isDocument?"":t.element.css("overflow-y"),e="scroll"===e||"auto"===e&&t.widthx(D(s),D(n))?o.important="horizontal":o.important="vertical",p.using.call(this,t,o)}),h.offset(y.extend(l,{using:t}))})},y.ui.position={fit:{left:function(t,e){var i=e.within, +s=i.isWindow?i.scrollLeft:i.offset.left,n=i.width,o=t.left-e.collisionPosition.marginLeft,h=s-o,a=o+e.collisionWidth-n-s;e.collisionWidth>n?0n?0=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}}),y.ui.plugin={add:function(t,e,i){var s,n=y.ui[t].prototype;for(s in i)n.plugins[s]=n.plugins[s]||[],n.plugins[s].push([e,i[s]])},call:function(t,e,i,s){var n,o=t.plugins[e];if(o&&(s||t.element[0].parentNode&&11!==t.element[0].parentNode.nodeType))for(n=0;n
    ").css({overflow:"hidden",position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})), +this.element=this.element.parent().data("ui-resizable",this.element.resizable("instance")),this.elementIsWrapper=!0,t={marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom"),marginLeft:this.originalElement.css("marginLeft")},this.element.css(t),this.originalElement.css("margin",0),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css(t),this._proportionallyResize()),this._setupHandles(),e.autoHide&&y(this.element).on("mouseenter",function(){e.disabled||(i._removeClass("ui-resizable-autohide"),i._handles.show())}).on("mouseleave",function(){e.disabled||i.resizing||(i._addClass("ui-resizable-autohide"),i._handles.hide())}),this._mouseInit()},_destroy:function(){this._mouseDestroy(),this._addedHandles.remove();function t(t){y(t +).removeData("resizable").removeData("ui-resizable").off(".resizable")}var e;return this.elementIsWrapper&&(t(this.element),e=this.element,this.originalElement.css({position:e.css("position"),width:e.outerWidth(),height:e.outerHeight(),top:e.css("top"),left:e.css("left")}).insertAfter(e),e.remove()),this.originalElement.css("resize",this.originalResizeStyle),t(this.originalElement),this},_setOption:function(t,e){switch(this._super(t,e),t){case"handles":this._removeHandles(),this._setupHandles();break;case"aspectRatio":this._aspectRatio=!!e}},_setupHandles:function(){var t,e,i,s,n,o=this.options,h=this;if(this.handles=o.handles||(y(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this._handles=y(),this._addedHandles=y(),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),i=this.handles.split( +","),this.handles={},e=0;e"),this._addClass(n,"ui-resizable-handle "+s),n.css({zIndex:o.zIndex}),this.handles[t]=".ui-resizable-"+t,this.element.children(this.handles[t]).length||(this.element.append(n),this._addedHandles=this._addedHandles.add(n));this._renderAxis=function(t){var e,i,s;for(e in t=t||this.element,this.handles)this.handles[e].constructor===String?this.handles[e]=this.element.children(this.handles[e]).first().show():(this.handles[e].jquery||this.handles[e].nodeType)&&(this.handles[e]=y(this.handles[e]),this._on(this.handles[e],{mousedown:h._mouseDown})),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/^(textarea|input|select|button)$/i)&&(i=y(this.handles[e],this.element),s=/sw|ne|nw|se|n|s/.test(e)?i.outerHeight():i.outerWidth(),i=["padding",/ne|nw|n/.test(e)?"Top":/se|sw|s/.test(e)?"Bottom":/^e$/.test(e)?"Right":"Left"].join(""),t.css(i,s),this._proportionallyResize()),this._handles=this._handles.add( +this.handles[e])},this._renderAxis(this.element),this._handles=this._handles.add(this.element.find(".ui-resizable-handle")),this._handles.disableSelection(),this._handles.on("mouseover",function(){h.resizing||(this.className&&(n=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),h.axis=n&&n[1]?n[1]:"se")}),o.autoHide&&(this._handles.hide(),this._addClass("ui-resizable-autohide"))},_removeHandles:function(){this._addedHandles.remove()},_mouseCapture:function(t){var e,i,s=!1;for(e in this.handles)(i=y(this.handles[e])[0])!==t.target&&!y.contains(i,t.target)||(s=!0);return!this.options.disabled&&s},_mouseStart:function(t){var e,i,s=this.options,n=this.element;return this.resizing=!0,this._renderProxy(),e=this._num(this.helper.css("left")),i=this._num(this.helper.css("top")),s.containment&&(e+=y(s.containment).scrollLeft()||0,i+=y(s.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:e,top:i},this.size=this._helper?{width:this.helper.width(),height:this.helper.height()}:{ +width:n.width(),height:n.height()},this.originalSize=this._helper?{width:n.outerWidth(),height:n.outerHeight()}:{width:n.width(),height:n.height()},this.sizeDiff={width:n.outerWidth()-n.width(),height:n.outerHeight()-n.height()},this.originalPosition={left:e,top:i},this.originalMousePosition={left:t.pageX,top:t.pageY},this.aspectRatio="number"==typeof s.aspectRatio?s.aspectRatio:this.originalSize.width/this.originalSize.height||1,s=y(".ui-resizable-"+this.axis).css("cursor"),y("body").css("cursor","auto"===s?this.axis+"-resize":s),this._addClass("ui-resizable-resizing"),this._propagate("start",t),!0},_mouseDrag:function(t){var e=this.originalMousePosition,i=this.axis,s=t.pageX-e.left||0,e=t.pageY-e.top||0,i=this._change[i];return this._updatePrevProperties(),i&&(e=i.apply(this,[t,s,e]),this._updateVirtualBoundaries(t.shiftKey),(this._aspectRatio||t.shiftKey)&&(e=this._updateRatio(e,t)),e=this._respectSize(e,t),this._updateCache(e),this._propagate("resize",t),e=this._applyChanges(), +!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),y.isEmptyObject(e)||(this._updatePrevProperties(),this._trigger("resize",t,this.ui()),this._applyChanges())),!1},_mouseStop:function(t){this.resizing=!1;var e,i,s,n=this.options,o=this;return this._helper&&(s=(e=(i=this._proportionallyResizeElements).length&&/textarea/i.test(i[0].nodeName))&&this._hasScroll(i[0],"left")?0:o.sizeDiff.height,i=e?0:o.sizeDiff.width,e={width:o.helper.width()-i,height:o.helper.height()-s},i=parseFloat(o.element.css("left"))+(o.position.left-o.originalPosition.left)||null,s=parseFloat(o.element.css("top"))+(o.position.top-o.originalPosition.top)||null,n.animate||this.element.css(y.extend(e,{top:s,left:i})),o.helper.height(o.size.height),o.helper.width(o.size.width),this._helper&&!n.animate&&this._proportionallyResize()),y("body").css("cursor","auto"),this._removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updatePrevProperties:function(){ +this.prevPosition={top:this.position.top,left:this.position.left},this.prevSize={width:this.size.width,height:this.size.height}},_applyChanges:function(){var t={};return this.position.top!==this.prevPosition.top&&(t.top=this.position.top+"px"),this.position.left!==this.prevPosition.left&&(t.left=this.position.left+"px"),this.size.width!==this.prevSize.width&&(t.width=this.size.width+"px"),this.size.height!==this.prevSize.height&&(t.height=this.size.height+"px"),this.helper.css(t),t},_updateVirtualBoundaries:function(t){var e,i,s=this.options,n={minWidth:this._isNumber(s.minWidth)?s.minWidth:0,maxWidth:this._isNumber(s.maxWidth)?s.maxWidth:1/0,minHeight:this._isNumber(s.minHeight)?s.minHeight:0,maxHeight:this._isNumber(s.maxHeight)?s.maxHeight:1/0};(this._aspectRatio||t)&&(e=n.minHeight*this.aspectRatio,i=n.minWidth/this.aspectRatio,s=n.maxHeight*this.aspectRatio,t=n.maxWidth/this.aspectRatio,e>n.minWidth&&(n.minWidth=e),i>n.minHeight&&(n.minHeight=i),st.width,h=this._isNumber(t.height)&&e.minHeight&&e.minHeight>t.height,a=this.originalPosition.left+this.originalSize.width,r=this.originalPosition.top+this.originalSize.height +,l=/sw|nw|w/.test(i),i=/nw|ne|n/.test(i);return o&&(t.width=e.minWidth),h&&(t.height=e.minHeight),s&&(t.width=e.maxWidth),n&&(t.height=e.maxHeight),o&&l&&(t.left=a-e.minWidth),s&&l&&(t.left=a-e.maxWidth),h&&i&&(t.top=r-e.minHeight),n&&i&&(t.top=r-e.maxHeight),t.width||t.height||t.left||!t.top?t.width||t.height||t.top||!t.left||(t.left=null):t.top=null,t},_getPaddingPlusBorderDimensions:function(t){for(var e=0,i=[],s=[t.css("borderTopWidth"),t.css("borderRightWidth"),t.css("borderBottomWidth"),t.css("borderLeftWidth")],n=[t.css("paddingTop"),t.css("paddingRight"),t.css("paddingBottom"),t.css("paddingLeft")];e<4;e++)i[e]=parseFloat(s[e])||0,i[e]+=parseFloat(n[e])||0;return{height:i[0]+i[2],width:i[1]+i[3]}},_proportionallyResize:function(){if(this._proportionallyResizeElements.length)for(var t,e=0,i=this.helper||this.element;e
    ").css({overflow:"hidden"}),this._addClass(this.helper,this._helper),this.helper.css({width:this.element.outerWidth(),height:this.element.outerHeight(),position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++e.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(t,e){return{width:this.originalSize.width+e}},w:function(t,e){var i=this.originalSize;return{left:this.originalPosition.left+e,width:i.width-e}},n:function(t,e,i){var s=this.originalSize;return{top:this.originalPosition.top+i,height:s.height-i}},s:function(t,e,i){return{height:this.originalSize.height+i}},se:function(t,e,i){return y.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},sw:function(t,e, +i){return y.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,e,i]))},ne:function(t,e,i){return y.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,e,i]))},nw:function(t,e,i){return y.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,e,i]))}},_propagate:function(t,e){y.ui.plugin.call(this,t,[e,this.ui()]),"resize"!==t&&this._trigger(t,e,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),y.ui.plugin.add("resizable","animate",{stop:function(e){var i=y(this).resizable("instance"),t=i.options,s=i._proportionallyResizeElements,n=s.length&&/textarea/i.test(s[0].nodeName),o=n&&i._hasScroll(s[0],"left")?0:i.sizeDiff.height,h=n?0:i.sizeDiff.width,n={width:i.size.width-h,height:i.size.height-o},h=parseFloat(i.element.css("left"))+(i.position.left-i.originalPosition.left +)||null,o=parseFloat(i.element.css("top"))+(i.position.top-i.originalPosition.top)||null;i.element.animate(y.extend(n,o&&h?{top:o,left:h}:{}),{duration:t.animateDuration,easing:t.animateEasing,step:function(){var t={width:parseFloat(i.element.css("width")),height:parseFloat(i.element.css("height")),top:parseFloat(i.element.css("top")),left:parseFloat(i.element.css("left"))};s&&s.length&&y(s[0]).css({width:t.width,height:t.height}),i._updateCache(t),i._propagate("resize",e)}})}}),y.ui.plugin.add("resizable","containment",{start:function(){var i,s,n=y(this).resizable("instance"),t=n.options,e=n.element,o=t.containment,h=o instanceof y?o.get(0):/parent/.test(o)?e.parent().get(0):o;h&&(n.containerElement=y(h),/document/.test(o)||o===document?(n.containerOffset={left:0,top:0},n.containerPosition={left:0,top:0},n.parentData={element:y(document),left:0,top:0,width:y(document).width(),height:y(document).height()||document.body.parentNode.scrollHeight}):(i=y(h),s=[],y(["Top","Right","Left","Bottom"]).each(function(t,e +){s[t]=n._num(i.css("padding"+e))}),n.containerOffset=i.offset(),n.containerPosition=i.position(),n.containerSize={height:i.innerHeight()-s[3],width:i.innerWidth()-s[1]},t=n.containerOffset,e=n.containerSize.height,o=n.containerSize.width,o=n._hasScroll(h,"left")?h.scrollWidth:o,e=n._hasScroll(h)?h.scrollHeight:e,n.parentData={element:h,left:t.left,top:t.top,width:o,height:e}))},resize:function(t){var e=y(this).resizable("instance"),i=e.options,s=e.containerOffset,n=e.position,o=e._aspectRatio||t.shiftKey,h={top:0,left:0},a=e.containerElement,t=!0;a[0]!==document&&/static/.test(a.css("position"))&&(h=s),n.left<(e._helper?s.left:0)&&(e.size.width=e.size.width+(e._helper?e.position.left-s.left:e.position.left-h.left),o&&(e.size.height=e.size.width/e.aspectRatio,t=!1),e.position.left=i.helper?s.left:0),n.top<(e._helper?s.top:0)&&(e.size.height=e.size.height+(e._helper?e.position.top-s.top:e.position.top),o&&(e.size.width=e.size.height*e.aspectRatio,t=!1),e.position.top=e._helper?s.top:0), +i=e.containerElement.get(0)===e.element.parent().get(0),n=/relative|absolute/.test(e.containerElement.css("position")),i&&n?(e.offset.left=e.parentData.left+e.position.left,e.offset.top=e.parentData.top+e.position.top):(e.offset.left=e.element.offset().left,e.offset.top=e.element.offset().top),n=Math.abs(e.sizeDiff.width+(e._helper?e.offset.left-h.left:e.offset.left-s.left)),s=Math.abs(e.sizeDiff.height+(e._helper?e.offset.top-h.top:e.offset.top-s.top)),n+e.size.width>=e.parentData.width&&(e.size.width=e.parentData.width-n,o&&(e.size.height=e.size.width/e.aspectRatio,t=!1)),s+e.size.height>=e.parentData.height&&(e.size.height=e.parentData.height-s,o&&(e.size.width=e.size.height*e.aspectRatio,t=!1)),t||(e.position.left=e.prevPosition.left,e.position.top=e.prevPosition.top,e.size.width=e.prevSize.width,e.size.height=e.prevSize.height)},stop:function(){var t=y(this).resizable("instance"),e=t.options,i=t.containerOffset,s=t.containerPosition,n=t.containerElement,o=y(t.helper),h=o.offset(),a=o.outerWidth( +)-t.sizeDiff.width,o=o.outerHeight()-t.sizeDiff.height;t._helper&&!e.animate&&/relative/.test(n.css("position"))&&y(this).css({left:h.left-s.left-i.left,width:a,height:o}),t._helper&&!e.animate&&/static/.test(n.css("position"))&&y(this).css({left:h.left-s.left-i.left,width:a,height:o})}}),y.ui.plugin.add("resizable","alsoResize",{start:function(){var t=y(this).resizable("instance").options;y(t.alsoResize).each(function(){var t=y(this);t.data("ui-resizable-alsoresize",{width:parseFloat(t.width()),height:parseFloat(t.height()),left:parseFloat(t.css("left")),top:parseFloat(t.css("top"))})})},resize:function(t,i){var e=y(this).resizable("instance"),s=e.options,n=e.originalSize,o=e.originalPosition,h={height:e.size.height-n.height||0,width:e.size.width-n.width||0,top:e.position.top-o.top||0,left:e.position.left-o.left||0};y(s.alsoResize).each(function(){var t=y(this),s=y(this).data("ui-resizable-alsoresize"),n={},e=t.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];y.each(e, +function(t,e){var i=(s[e]||0)+(h[e]||0);i&&0<=i&&(n[e]=i||null)}),t.css(n)})},stop:function(){y(this).removeData("ui-resizable-alsoresize")}}),y.ui.plugin.add("resizable","ghost",{start:function(){var t=y(this).resizable("instance"),e=t.size;t.ghost=t.originalElement.clone(),t.ghost.css({opacity:.25,display:"block",position:"relative",height:e.height,width:e.width,margin:0,left:0,top:0}),t._addClass(t.ghost,"ui-resizable-ghost"),!1!==y.uiBackCompat&&"string"==typeof t.options.ghost&&t.ghost.addClass(this.options.ghost),t.ghost.appendTo(t.helper)},resize:function(){var t=y(this).resizable("instance");t.ghost&&t.ghost.css({position:"relative",height:t.size.height,width:t.size.width})},stop:function(){var t=y(this).resizable("instance");t.ghost&&t.helper&&t.helper.get(0).removeChild(t.ghost.get(0))}}),y.ui.plugin.add("resizable","grid",{resize:function(){var t,e=y(this).resizable("instance"),i=e.options,s=e.size,n=e.originalSize,o=e.originalPosition,h=e.axis,a="number"==typeof i.grid?[i.grid,i.grid]:i.grid,r=a[0 +]||1,l=a[1]||1,u=Math.round((s.width-n.width)/r)*r,p=Math.round((s.height-n.height)/l)*l,d=n.width+u,c=n.height+p,f=i.maxWidth&&i.maxWidthd,s=i.minHeight&&i.minHeight>c;i.grid=a,m&&(d+=r),s&&(c+=l),f&&(d-=r),g&&(c-=l),/^(se|s|e)$/.test(h)?(e.size.width=d,e.size.height=c):/^(ne)$/.test(h)?(e.size.width=d,e.size.height=c,e.position.top=o.top-p):/^(sw)$/.test(h)?(e.size.width=d,e.size.height=c,e.position.left=o.left-u):((c-l<=0||d-r<=0)&&(t=e._getPaddingPlusBorderDimensions(this)),0=f[g]?0:Math.min(f[g],n));!a&&1=f[g]?0:Math.min(f[g],n));!a&&1-1){targetElements.on(evt+EVENT_NAMESPACE,function elementToggle(event){$.powerTip.toggle(this,event)})}else{targetElements.on(evt+EVENT_NAMESPACE,function elementOpen(event){$.powerTip.show(this,event)})}});$.each(options.closeEvents,function(idx,evt){if($.inArray(evt,options.openEvents)<0){targetElements.on(evt+EVENT_NAMESPACE,function elementClose(event){$.powerTip.hide(this,!isMouseEvent(event))})}});targetElements.on("keydown"+EVENT_NAMESPACE,function elementKeyDown(event){if(event.keyCode===27){$.powerTip.hide(this,true)}})}return targetElements};$.fn.powerTip.defaults={fadeInTime:200,fadeOutTime:100,followMouse:false,popupId:"powerTip",popupClass:null,intentSensitivity:7,intentPollInterval:100,closeDelay:100,placement:"n",smartPlacement:false,offset:10,mouseOnToPopup:false,manual:false,openEvents:["mouseenter","focus"],closeEvents:["mouseleave","blur"]};$.fn.powerTip.smartPlacementLists={n:["n","ne","nw","s"],e:["e","ne","se","w","nw","sw","n","s","e"],s:["s","se","sw","n"],w:["w","nw","sw","e","ne","se","n","s","w"],nw:["nw","w","sw","n","s","se","nw"],ne:["ne","e","se","n","s","sw","ne"],sw:["sw","w","nw","s","n","ne","sw"],se:["se","e","ne","s","n","nw","se"],"nw-alt":["nw-alt","n","ne-alt","sw-alt","s","se-alt","w","e"],"ne-alt":["ne-alt","n","nw-alt","se-alt","s","sw-alt","e","w"],"sw-alt":["sw-alt","s","se-alt","nw-alt","n","ne-alt","w","e"],"se-alt":["se-alt","s","sw-alt","ne-alt","n","nw-alt","e","w"]};$.powerTip={show:function apiShowTip(element,event){if(isMouseEvent(event)){trackMouse(event);session.previousX=event.pageX;session.previousY=event.pageY;$(element).data(DATA_DISPLAYCONTROLLER).show()}else{$(element).first().data(DATA_DISPLAYCONTROLLER).show(true,true)}return element},reposition:function apiResetPosition(element){$(element).first().data(DATA_DISPLAYCONTROLLER).resetPosition();return element},hide:function apiCloseTip(element,immediate){var displayController;immediate=element?immediate:true;if(element){displayController=$(element).first().data(DATA_DISPLAYCONTROLLER)}else if(session.activeHover){displayController=session.activeHover.data(DATA_DISPLAYCONTROLLER)}if(displayController){displayController.hide(immediate)}return element},toggle:function apiToggle(element,event){if(session.activeHover&&session.activeHover.is(element)){$.powerTip.hide(element,!isMouseEvent(event))}else{$.powerTip.show(element,event)}return element}};$.powerTip.showTip=$.powerTip.show;$.powerTip.closeTip=$.powerTip.hide;function CSSCoordinates(){var me=this;me.top="auto";me.left="auto";me.right="auto";me.bottom="auto";me.set=function(property,value){if($.isNumeric(value)){me[property]=Math.round(value)}}}function DisplayController(element,options,tipController){var hoverTimer=null,myCloseDelay=null;function openTooltip(immediate,forceOpen){cancelTimer();if(!element.data(DATA_HASACTIVEHOVER)){if(!immediate){session.tipOpenImminent=true;hoverTimer=setTimeout(function intentDelay(){hoverTimer=null;checkForIntent()},options.intentPollInterval)}else{if(forceOpen){element.data(DATA_FORCEDOPEN,true)}closeAnyDelayed();tipController.showTip(element)}}else{cancelClose()}}function closeTooltip(disableDelay){if(myCloseDelay){myCloseDelay=session.closeDelayTimeout=clearTimeout(myCloseDelay);session.delayInProgress=false}cancelTimer();session.tipOpenImminent=false;if(element.data(DATA_HASACTIVEHOVER)){element.data(DATA_FORCEDOPEN,false);if(!disableDelay){session.delayInProgress=true;session.closeDelayTimeout=setTimeout(function closeDelay(){session.closeDelayTimeout=null;tipController.hideTip(element);session.delayInProgress=false;myCloseDelay=null},options.closeDelay);myCloseDelay=session.closeDelayTimeout}else{tipController.hideTip(element)}}}function checkForIntent(){var xDifference=Math.abs(session.previousX-session.currentX),yDifference=Math.abs(session.previousY-session.currentY),totalDifference=xDifference+yDifference;if(totalDifference",{id:options.popupId});if($body.length===0){$body=$("body")}$body.append(tipElement);session.tooltips=session.tooltips?session.tooltips.add(tipElement):tipElement}if(options.followMouse){if(!tipElement.data(DATA_HASMOUSEMOVE)){$document.on("mousemove"+EVENT_NAMESPACE,positionTipOnCursor);$window.on("scroll"+EVENT_NAMESPACE,positionTipOnCursor);tipElement.data(DATA_HASMOUSEMOVE,true)}}function beginShowTip(element){element.data(DATA_HASACTIVEHOVER,true);tipElement.queue(function queueTipInit(next){showTip(element);next()})}function showTip(element){var tipContent;if(!element.data(DATA_HASACTIVEHOVER)){return}if(session.isTipOpen){if(!session.isClosing){hideTip(session.activeHover)}tipElement.delay(100).queue(function queueTipAgain(next){showTip(element);next()});return}element.trigger("powerTipPreRender");tipContent=getTooltipContent(element);if(tipContent){tipElement.empty().append(tipContent)}else{return}element.trigger("powerTipRender");session.activeHover=element;session.isTipOpen=true;tipElement.data(DATA_MOUSEONTOTIP,options.mouseOnToPopup);tipElement.addClass(options.popupClass);if(!options.followMouse||element.data(DATA_FORCEDOPEN)){positionTipOnElement(element);session.isFixedTipOpen=true}else{positionTipOnCursor()}if(!element.data(DATA_FORCEDOPEN)&&!options.followMouse){$document.on("click"+EVENT_NAMESPACE,function documentClick(event){var target=event.target;if(target!==element[0]){if(options.mouseOnToPopup){if(target!==tipElement[0]&&!$.contains(tipElement[0],target)){$.powerTip.hide()}}else{$.powerTip.hide()}}})}if(options.mouseOnToPopup&&!options.manual){tipElement.on("mouseenter"+EVENT_NAMESPACE,function tipMouseEnter(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).cancel()}});tipElement.on("mouseleave"+EVENT_NAMESPACE,function tipMouseLeave(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).hide()}})}tipElement.fadeIn(options.fadeInTime,function fadeInCallback(){if(!session.desyncTimeout){session.desyncTimeout=setInterval(closeDesyncedTip,500)}element.trigger("powerTipOpen")})}function hideTip(element){session.isClosing=true;session.isTipOpen=false;session.desyncTimeout=clearInterval(session.desyncTimeout);element.data(DATA_HASACTIVEHOVER,false);element.data(DATA_FORCEDOPEN,false);$document.off("click"+EVENT_NAMESPACE);tipElement.off(EVENT_NAMESPACE);tipElement.fadeOut(options.fadeOutTime,function fadeOutCallback(){var coords=new CSSCoordinates;session.activeHover=null;session.isClosing=false;session.isFixedTipOpen=false;tipElement.removeClass();coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset);tipElement.css(coords);element.trigger("powerTipClose")})}function positionTipOnCursor(){var tipWidth,tipHeight,coords,collisions,collisionCount;if(!session.isFixedTipOpen&&(session.isTipOpen||session.tipOpenImminent&&tipElement.data(DATA_HASMOUSEMOVE))){tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=new CSSCoordinates;coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset);collisions=getViewportCollisions(coords,tipWidth,tipHeight);if(collisions!==Collision.none){collisionCount=countFlags(collisions);if(collisionCount===1){if(collisions===Collision.right){coords.set("left",session.scrollLeft+session.windowWidth-tipWidth)}else if(collisions===Collision.bottom){coords.set("top",session.scrollTop+session.windowHeight-tipHeight)}}else{coords.set("left",session.currentX-tipWidth-options.offset);coords.set("top",session.currentY-tipHeight-options.offset)}}tipElement.css(coords)}}function positionTipOnElement(element){var priorityList,finalPlacement;if(options.smartPlacement||options.followMouse&&element.data(DATA_FORCEDOPEN)){priorityList=$.fn.powerTip.smartPlacementLists[options.placement];$.each(priorityList,function(idx,pos){var collisions=getViewportCollisions(placeTooltip(element,pos),tipElement.outerWidth(),tipElement.outerHeight());finalPlacement=pos;return collisions!==Collision.none})}else{placeTooltip(element,options.placement);finalPlacement=options.placement}tipElement.removeClass("w nw sw e ne se n s w se-alt sw-alt ne-alt nw-alt");tipElement.addClass(finalPlacement)}function placeTooltip(element,placement){var iterationCount=0,tipWidth,tipHeight,coords=new CSSCoordinates;coords.set("top",0);coords.set("left",0);tipElement.css(coords);do{tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=placementCalculator.compute(element,placement,tipWidth,tipHeight,options.offset);tipElement.css(coords)}while(++iterationCount<=5&&(tipWidth!==tipElement.outerWidth()||tipHeight!==tipElement.outerHeight()));return coords}function closeDesyncedTip(){var isDesynced=false,hasDesyncableCloseEvent=$.grep(["mouseleave","mouseout","blur","focusout"],function(eventType){return $.inArray(eventType,options.closeEvents)!==-1}).length>0;if(session.isTipOpen&&!session.isClosing&&!session.delayInProgress&&hasDesyncableCloseEvent){if(session.activeHover.data(DATA_HASACTIVEHOVER)===false||session.activeHover.is(":disabled")){isDesynced=true}else if(!isMouseOver(session.activeHover)&&!session.activeHover.is(":focus")&&!session.activeHover.data(DATA_FORCEDOPEN)){if(tipElement.data(DATA_MOUSEONTOTIP)){if(!isMouseOver(tipElement)){isDesynced=true}}else{isDesynced=true}}if(isDesynced){hideTip(session.activeHover)}}}this.showTip=beginShowTip;this.hideTip=hideTip;this.resetPosition=positionTipOnElement}function isSvgElement(element){return Boolean(window.SVGElement&&element[0]instanceof SVGElement)}function isMouseEvent(event){return Boolean(event&&$.inArray(event.type,MOUSE_EVENTS)>-1&&typeof event.pageX==="number")}function initTracking(){if(!session.mouseTrackingActive){session.mouseTrackingActive=true;getViewportDimensions();$(getViewportDimensions);$document.on("mousemove"+EVENT_NAMESPACE,trackMouse);$window.on("resize"+EVENT_NAMESPACE,trackResize);$window.on("scroll"+EVENT_NAMESPACE,trackScroll)}}function getViewportDimensions(){session.scrollLeft=$window.scrollLeft();session.scrollTop=$window.scrollTop();session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackResize(){session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackScroll(){var x=$window.scrollLeft(),y=$window.scrollTop();if(x!==session.scrollLeft){session.currentX+=x-session.scrollLeft;session.scrollLeft=x}if(y!==session.scrollTop){session.currentY+=y-session.scrollTop;session.scrollTop=y}}function trackMouse(event){session.currentX=event.pageX;session.currentY=event.pageY}function isMouseOver(element){var elementPosition=element.offset(),elementBox=element[0].getBoundingClientRect(),elementWidth=elementBox.right-elementBox.left,elementHeight=elementBox.bottom-elementBox.top;return session.currentX>=elementPosition.left&&session.currentX<=elementPosition.left+elementWidth&&session.currentY>=elementPosition.top&&session.currentY<=elementPosition.top+elementHeight}function getTooltipContent(element){var tipText=element.data(DATA_POWERTIP),tipObject=element.data(DATA_POWERTIPJQ),tipTarget=element.data(DATA_POWERTIPTARGET),targetElement,content;if(tipText){if($.isFunction(tipText)){tipText=tipText.call(element[0])}content=tipText}else if(tipObject){if($.isFunction(tipObject)){tipObject=tipObject.call(element[0])}if(tipObject.length>0){content=tipObject.clone(true,true)}}else if(tipTarget){targetElement=$("#"+tipTarget);if(targetElement.length>0){content=targetElement.html()}}return content}function getViewportCollisions(coords,elementWidth,elementHeight){var viewportTop=session.scrollTop,viewportLeft=session.scrollLeft,viewportBottom=viewportTop+session.windowHeight,viewportRight=viewportLeft+session.windowWidth,collisions=Collision.none;if(coords.topviewportBottom||Math.abs(coords.bottom-session.windowHeight)>viewportBottom){collisions|=Collision.bottom}if(coords.leftviewportRight){collisions|=Collision.left}if(coords.left+elementWidth>viewportRight||coords.right-1){ +targetElements.on(evt+EVENT_NAMESPACE,function elementToggle(event){$.powerTip.toggle(this,event)})}else{targetElements.on(evt+EVENT_NAMESPACE,function elementOpen(event){$.powerTip.show(this,event)})}});$.each(options.closeEvents,function(idx,evt){if($.inArray(evt,options.openEvents)<0){targetElements.on(evt+EVENT_NAMESPACE,function elementClose(event){$.powerTip.hide(this,!isMouseEvent(event))})}});targetElements.on("keydown"+EVENT_NAMESPACE,function elementKeyDown(event){if(event.keyCode===27){$.powerTip.hide(this,true)}})}return targetElements};$.fn.powerTip.defaults={fadeInTime:200,fadeOutTime:100,followMouse:false,popupId:"powerTip",popupClass:null,intentSensitivity:7,intentPollInterval:100,closeDelay:100,placement:"n",smartPlacement:false,offset:10,mouseOnToPopup:false,manual:false,openEvents:["mouseenter","focus"],closeEvents:["mouseleave","blur"]};$.fn.powerTip.smartPlacementLists={n:["n","ne","nw","s"],e:["e","ne","se","w","nw","sw","n","s","e"],s:["s","se","sw","n"],w:["w","nw","sw","e","ne","se", +"n","s","w"],nw:["nw","w","sw","n","s","se","nw"],ne:["ne","e","se","n","s","sw","ne"],sw:["sw","w","nw","s","n","ne","sw"],se:["se","e","ne","s","n","nw","se"],"nw-alt":["nw-alt","n","ne-alt","sw-alt","s","se-alt","w","e"],"ne-alt":["ne-alt","n","nw-alt","se-alt","s","sw-alt","e","w"],"sw-alt":["sw-alt","s","se-alt","nw-alt","n","ne-alt","w","e"],"se-alt":["se-alt","s","sw-alt","ne-alt","n","nw-alt","e","w"]};$.powerTip={show:function apiShowTip(element,event){if(isMouseEvent(event)){trackMouse(event);session.previousX=event.pageX;session.previousY=event.pageY;$(element).data(DATA_DISPLAYCONTROLLER).show()}else{$(element).first().data(DATA_DISPLAYCONTROLLER).show(true,true)}return element},reposition:function apiResetPosition(element){$(element).first().data(DATA_DISPLAYCONTROLLER).resetPosition();return element},hide:function apiCloseTip(element,immediate){var displayController;immediate=element?immediate:true;if(element){displayController=$(element).first().data(DATA_DISPLAYCONTROLLER)}else if( +session.activeHover){displayController=session.activeHover.data(DATA_DISPLAYCONTROLLER)}if(displayController){displayController.hide(immediate)}return element},toggle:function apiToggle(element,event){if(session.activeHover&&session.activeHover.is(element)){$.powerTip.hide(element,!isMouseEvent(event))}else{$.powerTip.show(element,event)}return element}};$.powerTip.showTip=$.powerTip.show;$.powerTip.closeTip=$.powerTip.hide;function CSSCoordinates(){var me=this;me.top="auto";me.left="auto";me.right="auto";me.bottom="auto";me.set=function(property,value){if($.isNumeric(value)){me[property]=Math.round(value)}}}function DisplayController(element,options,tipController){var hoverTimer=null,myCloseDelay=null;function openTooltip(immediate,forceOpen){cancelTimer();if(!element.data(DATA_HASACTIVEHOVER)){if(!immediate){session.tipOpenImminent=true;hoverTimer=setTimeout(function intentDelay(){hoverTimer=null;checkForIntent()},options.intentPollInterval)}else{if(forceOpen){element.data(DATA_FORCEDOPEN,true)} +closeAnyDelayed();tipController.showTip(element)}}else{cancelClose()}}function closeTooltip(disableDelay){if(myCloseDelay){myCloseDelay=session.closeDelayTimeout=clearTimeout(myCloseDelay);session.delayInProgress=false}cancelTimer();session.tipOpenImminent=false;if(element.data(DATA_HASACTIVEHOVER)){element.data(DATA_FORCEDOPEN,false);if(!disableDelay){session.delayInProgress=true;session.closeDelayTimeout=setTimeout(function closeDelay(){session.closeDelayTimeout=null;tipController.hideTip(element);session.delayInProgress=false;myCloseDelay=null},options.closeDelay);myCloseDelay=session.closeDelayTimeout}else{tipController.hideTip(element)}}}function checkForIntent(){var xDifference=Math.abs(session.previousX-session.currentX),yDifference=Math.abs(session.previousY-session.currentY),totalDifference=xDifference+yDifference;if(totalDifference",{id:options.popupId});if($body.length===0){$body=$("body")}$body.append(tipElement);session.tooltips=session.tooltips?session.tooltips.add(tipElement):tipElement}if(options.followMouse){if(!tipElement.data(DATA_HASMOUSEMOVE)){$document.on("mousemove"+EVENT_NAMESPACE,positionTipOnCursor);$window.on("scroll"+EVENT_NAMESPACE,positionTipOnCursor);tipElement.data(DATA_HASMOUSEMOVE,true)}}function beginShowTip(element){element.data(DATA_HASACTIVEHOVER,true);tipElement.queue(function queueTipInit(next){showTip(element);next()})}function showTip(element){var tipContent;if(!element.data(DATA_HASACTIVEHOVER)){return}if( +session.isTipOpen){if(!session.isClosing){hideTip(session.activeHover)}tipElement.delay(100).queue(function queueTipAgain(next){showTip(element);next()});return}element.trigger("powerTipPreRender");tipContent=getTooltipContent(element);if(tipContent){tipElement.empty().append(tipContent)}else{return}element.trigger("powerTipRender");session.activeHover=element;session.isTipOpen=true;tipElement.data(DATA_MOUSEONTOTIP,options.mouseOnToPopup);tipElement.addClass(options.popupClass);if(!options.followMouse||element.data(DATA_FORCEDOPEN)){positionTipOnElement(element);session.isFixedTipOpen=true}else{positionTipOnCursor()}if(!element.data(DATA_FORCEDOPEN)&&!options.followMouse){$document.on("click"+EVENT_NAMESPACE,function documentClick(event){var target=event.target;if(target!==element[0]){if(options.mouseOnToPopup){if(target!==tipElement[0]&&!$.contains(tipElement[0],target)){$.powerTip.hide()}}else{$.powerTip.hide()}}})}if(options.mouseOnToPopup&&!options.manual){tipElement.on("mouseenter"+EVENT_NAMESPACE, +function tipMouseEnter(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).cancel()}});tipElement.on("mouseleave"+EVENT_NAMESPACE,function tipMouseLeave(){if(session.activeHover){session.activeHover.data(DATA_DISPLAYCONTROLLER).hide()}})}tipElement.fadeIn(options.fadeInTime,function fadeInCallback(){if(!session.desyncTimeout){session.desyncTimeout=setInterval(closeDesyncedTip,500)}element.trigger("powerTipOpen")})}function hideTip(element){session.isClosing=true;session.isTipOpen=false;session.desyncTimeout=clearInterval(session.desyncTimeout);element.data(DATA_HASACTIVEHOVER,false);element.data(DATA_FORCEDOPEN,false);$document.off("click"+EVENT_NAMESPACE);tipElement.off(EVENT_NAMESPACE);tipElement.fadeOut(options.fadeOutTime,function fadeOutCallback(){var coords=new CSSCoordinates;session.activeHover=null;session.isClosing=false;session.isFixedTipOpen=false;tipElement.removeClass();coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset); +tipElement.css(coords);element.trigger("powerTipClose")})}function positionTipOnCursor(){var tipWidth,tipHeight,coords,collisions,collisionCount;if(!session.isFixedTipOpen&&(session.isTipOpen||session.tipOpenImminent&&tipElement.data(DATA_HASMOUSEMOVE))){tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=new CSSCoordinates;coords.set("top",session.currentY+options.offset);coords.set("left",session.currentX+options.offset);collisions=getViewportCollisions(coords,tipWidth,tipHeight);if(collisions!==Collision.none){collisionCount=countFlags(collisions);if(collisionCount===1){if(collisions===Collision.right){coords.set("left",session.scrollLeft+session.windowWidth-tipWidth)}else if(collisions===Collision.bottom){coords.set("top",session.scrollTop+session.windowHeight-tipHeight)}}else{coords.set("left",session.currentX-tipWidth-options.offset);coords.set("top",session.currentY-tipHeight-options.offset)}}tipElement.css(coords)}}function positionTipOnElement(element){var priorityList, +finalPlacement;if(options.smartPlacement||options.followMouse&&element.data(DATA_FORCEDOPEN)){priorityList=$.fn.powerTip.smartPlacementLists[options.placement];$.each(priorityList,function(idx,pos){var collisions=getViewportCollisions(placeTooltip(element,pos),tipElement.outerWidth(),tipElement.outerHeight());finalPlacement=pos;return collisions!==Collision.none})}else{placeTooltip(element,options.placement);finalPlacement=options.placement}tipElement.removeClass("w nw sw e ne se n s w se-alt sw-alt ne-alt nw-alt");tipElement.addClass(finalPlacement)}function placeTooltip(element,placement){var iterationCount=0,tipWidth,tipHeight,coords=new CSSCoordinates;coords.set("top",0);coords.set("left",0);tipElement.css(coords);do{tipWidth=tipElement.outerWidth();tipHeight=tipElement.outerHeight();coords=placementCalculator.compute(element,placement,tipWidth,tipHeight,options.offset);tipElement.css(coords)}while(++iterationCount<=5&&(tipWidth!==tipElement.outerWidth()||tipHeight!==tipElement.outerHeight())); +return coords}function closeDesyncedTip(){var isDesynced=false,hasDesyncableCloseEvent=$.grep(["mouseleave","mouseout","blur","focusout"],function(eventType){return $.inArray(eventType,options.closeEvents)!==-1}).length>0;if(session.isTipOpen&&!session.isClosing&&!session.delayInProgress&&hasDesyncableCloseEvent){if(session.activeHover.data(DATA_HASACTIVEHOVER)===false||session.activeHover.is(":disabled")){isDesynced=true}else if(!isMouseOver(session.activeHover)&&!session.activeHover.is(":focus")&&!session.activeHover.data(DATA_FORCEDOPEN)){if(tipElement.data(DATA_MOUSEONTOTIP)){if(!isMouseOver(tipElement)){isDesynced=true}}else{isDesynced=true}}if(isDesynced){hideTip(session.activeHover)}}}this.showTip=beginShowTip;this.hideTip=hideTip;this.resetPosition=positionTipOnElement}function isSvgElement(element){return Boolean(window.SVGElement&&element[0]instanceof SVGElement)}function isMouseEvent(event){return Boolean(event&&$.inArray(event.type,MOUSE_EVENTS)>-1&&typeof event.pageX==="number")} +function initTracking(){if(!session.mouseTrackingActive){session.mouseTrackingActive=true;getViewportDimensions();$(getViewportDimensions);$document.on("mousemove"+EVENT_NAMESPACE,trackMouse);$window.on("resize"+EVENT_NAMESPACE,trackResize);$window.on("scroll"+EVENT_NAMESPACE,trackScroll)}}function getViewportDimensions(){session.scrollLeft=$window.scrollLeft();session.scrollTop=$window.scrollTop();session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackResize(){session.windowWidth=$window.width();session.windowHeight=$window.height()}function trackScroll(){var x=$window.scrollLeft(),y=$window.scrollTop();if(x!==session.scrollLeft){session.currentX+=x-session.scrollLeft;session.scrollLeft=x}if(y!==session.scrollTop){session.currentY+=y-session.scrollTop;session.scrollTop=y}}function trackMouse(event){session.currentX=event.pageX;session.currentY=event.pageY}function isMouseOver(element){var elementPosition=element.offset(),elementBox=element[0].getBoundingClientRect(), +elementWidth=elementBox.right-elementBox.left,elementHeight=elementBox.bottom-elementBox.top;return session.currentX>=elementPosition.left&&session.currentX<=elementPosition.left+elementWidth&&session.currentY>=elementPosition.top&&session.currentY<=elementPosition.top+elementHeight}function getTooltipContent(element){var tipText=element.data(DATA_POWERTIP),tipObject=element.data(DATA_POWERTIPJQ),tipTarget=element.data(DATA_POWERTIPTARGET),targetElement,content;if(tipText){if($.isFunction(tipText)){tipText=tipText.call(element[0])}content=tipText}else if(tipObject){if($.isFunction(tipObject)){tipObject=tipObject.call(element[0])}if(tipObject.length>0){content=tipObject.clone(true,true)}}else if(tipTarget){targetElement=$("#"+tipTarget);if(targetElement.length>0){content=targetElement.html()}}return content}function getViewportCollisions(coords,elementWidth,elementHeight){var viewportTop=session.scrollTop,viewportLeft=session.scrollLeft,viewportBottom=viewportTop+session.windowHeight, +viewportRight=viewportLeft+session.windowWidth,collisions=Collision.none;if(coords.topviewportBottom||Math.abs(coords.bottom-session.windowHeight)>viewportBottom){collisions|=Collision.bottom}if(coords.leftviewportRight){collisions|=Collision.left}if(coords.left+elementWidth>viewportRight||coords.right1)){a.preventDefault();var c=a.originalEvent.changedTouches[0],d=document.createEvent("MouseEvents");d.initMouseEvent(b,!0,!0,window,1,c.screenX,c.screenY,c.clientX,c.clientY,!1,!1,!1,!1,0,null),a.target.dispatchEvent(d)}}if(a.support.touch="ontouchend"in document,a.support.touch){var e,b=a.ui.mouse.prototype,c=b._mouseInit,d=b._mouseDestroy;b._touchStart=function(a){var b=this;!e&&b._mouseCapture(a.originalEvent.changedTouches[0])&&(e=!0,b._touchMoved=!1,f(a,"mouseover"),f(a,"mousemove"),f(a,"mousedown"))},b._touchMove=function(a){e&&(this._touchMoved=!0,f(a,"mousemove"))},b._touchEnd=function(a){e&&(f(a,"mouseup"),f(a,"mouseout"),this._touchMoved||f(a,"click"),e=!1)},b._mouseInit=function(){var b=this;b.element.bind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b,"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),c.call(b)},b._mouseDestroy=function(){var b=this;b.element.unbind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b,"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),d.call(b)}}}(jQuery);/*! SmartMenus jQuery Plugin - v1.1.0 - September 17, 2017 + */!function(a){function f(a,b){if(!(a.originalEvent.touches.length>1)){a.preventDefault();var c=a.originalEvent.changedTouches[0],d=document.createEvent("MouseEvents");d.initMouseEvent(b,!0,!0,window,1,c.screenX,c.screenY,c.clientX,c.clientY,!1,!1,!1,!1,0,null),a.target.dispatchEvent(d)}}if(a.support.touch="ontouchend"in document,a.support.touch){var e,b=a.ui.mouse.prototype,c=b._mouseInit,d=b._mouseDestroy;b._touchStart=function(a){var b=this;!e&&b._mouseCapture(a.originalEvent.changedTouches[0])&&(e=!0,b._touchMoved=!1,f(a,"mouseover"),f(a,"mousemove"),f(a,"mousedown"))},b._touchMove=function(a){e&&(this._touchMoved=!0,f(a,"mousemove"))},b._touchEnd=function(a){e&&(f(a,"mouseup"),f(a,"mouseout"),this._touchMoved||f(a,"click"),e=!1)},b._mouseInit=function(){var b=this;b.element.bind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b,"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),c.call(b)},b._mouseDestroy=function(){var b=this;b.element.unbind({touchstart:a.proxy(b,"_touchStart"),touchmove:a.proxy(b, +"_touchMove"),touchend:a.proxy(b,"_touchEnd")}),d.call(b)}}}(jQuery); +/*! SmartMenus jQuery Plugin - v1.1.0 - September 17, 2017 * http://www.smartmenus.org/ - * Copyright Vasil Dinkov, Vadikom Web Ltd. http://vadikom.com; Licensed MIT */(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):"object"==typeof module&&"object"==typeof module.exports?module.exports=t(require("jquery")):t(jQuery)})(function($){function initMouseDetection(t){var e=".smartmenus_mouse";if(mouseDetectionEnabled||t)mouseDetectionEnabled&&t&&($(document).off(e),mouseDetectionEnabled=!1);else{var i=!0,s=null,o={mousemove:function(t){var e={x:t.pageX,y:t.pageY,timeStamp:(new Date).getTime()};if(s){var o=Math.abs(s.x-e.x),a=Math.abs(s.y-e.y);if((o>0||a>0)&&2>=o&&2>=a&&300>=e.timeStamp-s.timeStamp&&(mouse=!0,i)){var n=$(t.target).closest("a");n.is("a")&&$.each(menuTrees,function(){return $.contains(this.$root[0],n[0])?(this.itemEnter({currentTarget:n[0]}),!1):void 0}),i=!1}}s=e}};o[touchEvents?"touchstart":"pointerover pointermove pointerout MSPointerOver MSPointerMove MSPointerOut"]=function(t){isTouchEvent(t.originalEvent)&&(mouse=!1)},$(document).on(getEventsNS(o,e)),mouseDetectionEnabled=!0}}function isTouchEvent(t){return!/^(4|mouse)$/.test(t.pointerType)}function getEventsNS(t,e){e||(e="");var i={};for(var s in t)i[s.split(" ").join(e+" ")+e]=t[s];return i}var menuTrees=[],mouse=!1,touchEvents="ontouchstart"in window,mouseDetectionEnabled=!1,requestAnimationFrame=window.requestAnimationFrame||function(t){return setTimeout(t,1e3/60)},cancelAnimationFrame=window.cancelAnimationFrame||function(t){clearTimeout(t)},canAnimate=!!$.fn.animate;return $.SmartMenus=function(t,e){this.$root=$(t),this.opts=e,this.rootId="",this.accessIdPrefix="",this.$subArrow=null,this.activatedItems=[],this.visibleSubMenus=[],this.showTimeout=0,this.hideTimeout=0,this.scrollTimeout=0,this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.idInc=0,this.$firstLink=null,this.$firstSub=null,this.disabled=!1,this.$disableOverlay=null,this.$touchScrollingSub=null,this.cssTransforms3d="perspective"in t.style||"webkitPerspective"in t.style,this.wasCollapsible=!1,this.init()},$.extend($.SmartMenus,{hideAll:function(){$.each(menuTrees,function(){this.menuHideAll()})},destroy:function(){for(;menuTrees.length;)menuTrees[0].destroy();initMouseDetection(!0)},prototype:{init:function(t){var e=this;if(!t){menuTrees.push(this),this.rootId=((new Date).getTime()+Math.random()+"").replace(/\D/g,""),this.accessIdPrefix="sm-"+this.rootId+"-",this.$root.hasClass("sm-rtl")&&(this.opts.rightToLeftSubMenus=!0);var i=".smartmenus";this.$root.data("smartmenus",this).attr("data-smartmenus-id",this.rootId).dataSM("level",1).on(getEventsNS({"mouseover focusin":$.proxy(this.rootOver,this),"mouseout focusout":$.proxy(this.rootOut,this),keydown:$.proxy(this.rootKeyDown,this)},i)).on(getEventsNS({mouseenter:$.proxy(this.itemEnter,this),mouseleave:$.proxy(this.itemLeave,this),mousedown:$.proxy(this.itemDown,this),focus:$.proxy(this.itemFocus,this),blur:$.proxy(this.itemBlur,this),click:$.proxy(this.itemClick,this)},i),"a"),i+=this.rootId,this.opts.hideOnClick&&$(document).on(getEventsNS({touchstart:$.proxy(this.docTouchStart,this),touchmove:$.proxy(this.docTouchMove,this),touchend:$.proxy(this.docTouchEnd,this),click:$.proxy(this.docClick,this)},i)),$(window).on(getEventsNS({"resize orientationchange":$.proxy(this.winResize,this)},i)),this.opts.subIndicators&&(this.$subArrow=$("").addClass("sub-arrow"),this.opts.subIndicatorsText&&this.$subArrow.html(this.opts.subIndicatorsText)),initMouseDetection()}if(this.$firstSub=this.$root.find("ul").each(function(){e.menuInit($(this))}).eq(0),this.$firstLink=this.$root.find("a").eq(0),this.opts.markCurrentItem){var s=/(index|default)\.[^#\?\/]*/i,o=/#.*/,a=window.location.href.replace(s,""),n=a.replace(o,"");this.$root.find("a").each(function(){var t=this.href.replace(s,""),i=$(this);(t==a||t==n)&&(i.addClass("current"),e.opts.markCurrentTree&&i.parentsUntil("[data-smartmenus-id]","ul").each(function(){$(this).dataSM("parent-a").addClass("current")}))})}this.wasCollapsible=this.isCollapsible()},destroy:function(t){if(!t){var e=".smartmenus";this.$root.removeData("smartmenus").removeAttr("data-smartmenus-id").removeDataSM("level").off(e),e+=this.rootId,$(document).off(e),$(window).off(e),this.opts.subIndicators&&(this.$subArrow=null)}this.menuHideAll();var i=this;this.$root.find("ul").each(function(){var t=$(this);t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.dataSM("shown-before")&&((i.opts.subMenusMinWidth||i.opts.subMenusMaxWidth)&&t.css({width:"",minWidth:"",maxWidth:""}).removeClass("sm-nowrap"),t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.css({zIndex:"",top:"",left:"",marginLeft:"",marginTop:"",display:""})),0==(t.attr("id")||"").indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeDataSM("in-mega").removeDataSM("shown-before").removeDataSM("scroll-arrows").removeDataSM("parent-a").removeDataSM("level").removeDataSM("beforefirstshowfired").removeAttr("role").removeAttr("aria-hidden").removeAttr("aria-labelledby").removeAttr("aria-expanded"),this.$root.find("a.has-submenu").each(function(){var t=$(this);0==t.attr("id").indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeClass("has-submenu").removeDataSM("sub").removeAttr("aria-haspopup").removeAttr("aria-controls").removeAttr("aria-expanded").closest("li").removeDataSM("sub"),this.opts.subIndicators&&this.$root.find("span.sub-arrow").remove(),this.opts.markCurrentItem&&this.$root.find("a.current").removeClass("current"),t||(this.$root=null,this.$firstLink=null,this.$firstSub=null,this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),menuTrees.splice($.inArray(this,menuTrees),1))},disable:function(t){if(!this.disabled){if(this.menuHideAll(),!t&&!this.opts.isPopup&&this.$root.is(":visible")){var e=this.$root.offset();this.$disableOverlay=$('
    ').css({position:"absolute",top:e.top,left:e.left,width:this.$root.outerWidth(),height:this.$root.outerHeight(),zIndex:this.getStartZIndex(!0),opacity:0}).appendTo(document.body)}this.disabled=!0}},docClick:function(t){return this.$touchScrollingSub?(this.$touchScrollingSub=null,void 0):((this.visibleSubMenus.length&&!$.contains(this.$root[0],t.target)||$(t.target).closest("a").length)&&this.menuHideAll(),void 0)},docTouchEnd:function(){if(this.lastTouch){if(!(!this.visibleSubMenus.length||void 0!==this.lastTouch.x2&&this.lastTouch.x1!=this.lastTouch.x2||void 0!==this.lastTouch.y2&&this.lastTouch.y1!=this.lastTouch.y2||this.lastTouch.target&&$.contains(this.$root[0],this.lastTouch.target))){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var t=this;this.hideTimeout=setTimeout(function(){t.menuHideAll()},350)}this.lastTouch=null}},docTouchMove:function(t){if(this.lastTouch){var e=t.originalEvent.touches[0];this.lastTouch.x2=e.pageX,this.lastTouch.y2=e.pageY}},docTouchStart:function(t){var e=t.originalEvent.touches[0];this.lastTouch={x1:e.pageX,y1:e.pageY,target:e.target}},enable:function(){this.disabled&&(this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),this.disabled=!1)},getClosestMenu:function(t){for(var e=$(t).closest("ul");e.dataSM("in-mega");)e=e.parent().closest("ul");return e[0]||null},getHeight:function(t){return this.getOffset(t,!0)},getOffset:function(t,e){var i;"none"==t.css("display")&&(i={position:t[0].style.position,visibility:t[0].style.visibility},t.css({position:"absolute",visibility:"hidden"}).show());var s=t[0].getBoundingClientRect&&t[0].getBoundingClientRect(),o=s&&(e?s.height||s.bottom-s.top:s.width||s.right-s.left);return o||0===o||(o=e?t[0].offsetHeight:t[0].offsetWidth),i&&t.hide().css(i),o},getStartZIndex:function(t){var e=parseInt(this[t?"$root":"$firstSub"].css("z-index"));return!t&&isNaN(e)&&(e=parseInt(this.$root.css("z-index"))),isNaN(e)?1:e},getTouchPoint:function(t){return t.touches&&t.touches[0]||t.changedTouches&&t.changedTouches[0]||t},getViewport:function(t){var e=t?"Height":"Width",i=document.documentElement["client"+e],s=window["inner"+e];return s&&(i=Math.min(i,s)),i},getViewportHeight:function(){return this.getViewport(!0)},getViewportWidth:function(){return this.getViewport()},getWidth:function(t){return this.getOffset(t)},handleEvents:function(){return!this.disabled&&this.isCSSOn()},handleItemEvents:function(t){return this.handleEvents()&&!this.isLinkInMegaMenu(t)},isCollapsible:function(){return"static"==this.$firstSub.css("position")},isCSSOn:function(){return"inline"!=this.$firstLink.css("display")},isFixed:function(){var t="fixed"==this.$root.css("position");return t||this.$root.parentsUntil("body").each(function(){return"fixed"==$(this).css("position")?(t=!0,!1):void 0}),t},isLinkInMegaMenu:function(t){return $(this.getClosestMenu(t[0])).hasClass("mega-menu")},isTouchMode:function(){return!mouse||this.opts.noMouseOver||this.isCollapsible()},itemActivate:function(t,e){var i=t.closest("ul"),s=i.dataSM("level");if(s>1&&(!this.activatedItems[s-2]||this.activatedItems[s-2][0]!=i.dataSM("parent-a")[0])){var o=this;$(i.parentsUntil("[data-smartmenus-id]","ul").get().reverse()).add(i).each(function(){o.itemActivate($(this).dataSM("parent-a"))})}if((!this.isCollapsible()||e)&&this.menuHideSubMenus(this.activatedItems[s-1]&&this.activatedItems[s-1][0]==t[0]?s:s-1),this.activatedItems[s-1]=t,this.$root.triggerHandler("activate.smapi",t[0])!==!1){var a=t.dataSM("sub");a&&(this.isTouchMode()||!this.opts.showOnClick||this.clickActivated)&&this.menuShow(a)}},itemBlur:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&this.$root.triggerHandler("blur.smapi",e[0])},itemClick:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(this.$touchScrollingSub&&this.$touchScrollingSub[0]==e.closest("ul")[0])return this.$touchScrollingSub=null,t.stopPropagation(),!1;if(this.$root.triggerHandler("click.smapi",e[0])===!1)return!1;var i=$(t.target).is(".sub-arrow"),s=e.dataSM("sub"),o=s?2==s.dataSM("level"):!1,a=this.isCollapsible(),n=/toggle$/.test(this.opts.collapsibleBehavior),r=/link$/.test(this.opts.collapsibleBehavior),h=/^accordion/.test(this.opts.collapsibleBehavior);if(s&&!s.is(":visible")){if((!r||!a||i)&&(this.opts.showOnClick&&o&&(this.clickActivated=!0),this.itemActivate(e,h),s.is(":visible")))return this.focusActivated=!0,!1}else if(a&&(n||i))return this.itemActivate(e,h),this.menuHide(s),n&&(this.focusActivated=!1),!1;return this.opts.showOnClick&&o||e.hasClass("disabled")||this.$root.triggerHandler("select.smapi",e[0])===!1?!1:void 0}},itemDown:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&e.dataSM("mousedown",!0)},itemEnter:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(!this.isTouchMode()){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);var i=this;this.showTimeout=setTimeout(function(){i.itemActivate(e)},this.opts.showOnClick&&1==e.closest("ul").dataSM("level")?1:this.opts.showTimeout)}this.$root.triggerHandler("mouseenter.smapi",e[0])}},itemFocus:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(!this.focusActivated||this.isTouchMode()&&e.dataSM("mousedown")||this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0]==e[0]||this.itemActivate(e,!0),this.$root.triggerHandler("focus.smapi",e[0]))},itemLeave:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(this.isTouchMode()||(e[0].blur(),this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0)),e.removeDataSM("mousedown"),this.$root.triggerHandler("mouseleave.smapi",e[0]))},menuHide:function(t){if(this.$root.triggerHandler("beforehide.smapi",t[0])!==!1&&(canAnimate&&t.stop(!0,!0),"none"!=t.css("display"))){var e=function(){t.css("z-index","")};this.isCollapsible()?canAnimate&&this.opts.collapsibleHideFunction?this.opts.collapsibleHideFunction.call(this,t,e):t.hide(this.opts.collapsibleHideDuration,e):canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,t,e):t.hide(this.opts.hideDuration,e),t.dataSM("scroll")&&(this.menuScrollStop(t),t.css({"touch-action":"","-ms-touch-action":"","-webkit-transform":"",transform:""}).off(".smartmenus_scroll").removeDataSM("scroll").dataSM("scroll-arrows").hide()),t.dataSM("parent-a").removeClass("highlighted").attr("aria-expanded","false"),t.attr({"aria-expanded":"false","aria-hidden":"true"});var i=t.dataSM("level");this.activatedItems.splice(i-1,1),this.visibleSubMenus.splice($.inArray(t,this.visibleSubMenus),1),this.$root.triggerHandler("hide.smapi",t[0])}},menuHideAll:function(){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);for(var t=this.opts.isPopup?1:0,e=this.visibleSubMenus.length-1;e>=t;e--)this.menuHide(this.visibleSubMenus[e]);this.opts.isPopup&&(canAnimate&&this.$root.stop(!0,!0),this.$root.is(":visible")&&(canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,this.$root):this.$root.hide(this.opts.hideDuration))),this.activatedItems=[],this.visibleSubMenus=[],this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.$root.triggerHandler("hideAll.smapi")},menuHideSubMenus:function(t){for(var e=this.activatedItems.length-1;e>=t;e--){var i=this.activatedItems[e].dataSM("sub");i&&this.menuHide(i)}},menuInit:function(t){if(!t.dataSM("in-mega")){t.hasClass("mega-menu")&&t.find("ul").dataSM("in-mega",!0);for(var e=2,i=t[0];(i=i.parentNode.parentNode)!=this.$root[0];)e++;var s=t.prevAll("a").eq(-1);s.length||(s=t.prevAll().find("a").eq(-1)),s.addClass("has-submenu").dataSM("sub",t),t.dataSM("parent-a",s).dataSM("level",e).parent().dataSM("sub",t);var o=s.attr("id")||this.accessIdPrefix+ ++this.idInc,a=t.attr("id")||this.accessIdPrefix+ ++this.idInc;s.attr({id:o,"aria-haspopup":"true","aria-controls":a,"aria-expanded":"false"}),t.attr({id:a,role:"group","aria-hidden":"true","aria-labelledby":o,"aria-expanded":"false"}),this.opts.subIndicators&&s[this.opts.subIndicatorsPos](this.$subArrow.clone())}},menuPosition:function(t){var e,i,s=t.dataSM("parent-a"),o=s.closest("li"),a=o.parent(),n=t.dataSM("level"),r=this.getWidth(t),h=this.getHeight(t),u=s.offset(),l=u.left,c=u.top,d=this.getWidth(s),m=this.getHeight(s),p=$(window),f=p.scrollLeft(),v=p.scrollTop(),b=this.getViewportWidth(),S=this.getViewportHeight(),g=a.parent().is("[data-sm-horizontal-sub]")||2==n&&!a.hasClass("sm-vertical"),M=this.opts.rightToLeftSubMenus&&!o.is("[data-sm-reverse]")||!this.opts.rightToLeftSubMenus&&o.is("[data-sm-reverse]"),w=2==n?this.opts.mainMenuSubOffsetX:this.opts.subMenusSubOffsetX,T=2==n?this.opts.mainMenuSubOffsetY:this.opts.subMenusSubOffsetY;if(g?(e=M?d-r-w:w,i=this.opts.bottomToTopSubMenus?-h-T:m+T):(e=M?w-r:d-w,i=this.opts.bottomToTopSubMenus?m-T-h:T),this.opts.keepInViewport){var y=l+e,I=c+i;if(M&&f>y?e=g?f-y+e:d-w:!M&&y+r>f+b&&(e=g?f+b-r-y+e:w-r),g||(S>h&&I+h>v+S?i+=v+S-h-I:(h>=S||v>I)&&(i+=v-I)),g&&(I+h>v+S+.49||v>I)||!g&&h>S+.49){var x=this;t.dataSM("scroll-arrows")||t.dataSM("scroll-arrows",$([$('')[0],$('')[0]]).on({mouseenter:function(){t.dataSM("scroll").up=$(this).hasClass("scroll-up"),x.menuScroll(t)},mouseleave:function(e){x.menuScrollStop(t),x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(t){t.preventDefault()}}).insertAfter(t));var A=".smartmenus_scroll";if(t.dataSM("scroll",{y:this.cssTransforms3d?0:i-m,step:1,itemH:m,subH:h,arrowDownH:this.getHeight(t.dataSM("scroll-arrows").eq(1))}).on(getEventsNS({mouseover:function(e){x.menuScrollOver(t,e)},mouseout:function(e){x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(e){x.menuScrollMousewheel(t,e)}},A)).dataSM("scroll-arrows").css({top:"auto",left:"0",marginLeft:e+(parseInt(t.css("border-left-width"))||0),width:r-(parseInt(t.css("border-left-width"))||0)-(parseInt(t.css("border-right-width"))||0),zIndex:t.css("z-index")}).eq(g&&this.opts.bottomToTopSubMenus?0:1).show(),this.isFixed()){var C={};C[touchEvents?"touchstart touchmove touchend":"pointerdown pointermove pointerup MSPointerDown MSPointerMove MSPointerUp"]=function(e){x.menuScrollTouch(t,e)},t.css({"touch-action":"none","-ms-touch-action":"none"}).on(getEventsNS(C,A))}}}t.css({top:"auto",left:"0",marginLeft:e,marginTop:i-m})},menuScroll:function(t,e,i){var s,o=t.dataSM("scroll"),a=t.dataSM("scroll-arrows"),n=o.up?o.upEnd:o.downEnd;if(!e&&o.momentum){if(o.momentum*=.92,s=o.momentum,.5>s)return this.menuScrollStop(t),void 0}else s=i||(e||!this.opts.scrollAccelerate?this.opts.scrollStep:Math.floor(o.step));var r=t.dataSM("level");if(this.activatedItems[r-1]&&this.activatedItems[r-1].dataSM("sub")&&this.activatedItems[r-1].dataSM("sub").is(":visible")&&this.menuHideSubMenus(r-1),o.y=o.up&&o.y>=n||!o.up&&n>=o.y?o.y:Math.abs(n-o.y)>s?o.y+(o.up?s:-s):n,t.css(this.cssTransforms3d?{"-webkit-transform":"translate3d(0, "+o.y+"px, 0)",transform:"translate3d(0, "+o.y+"px, 0)"}:{marginTop:o.y}),mouse&&(o.up&&o.y>o.downEnd||!o.up&&o.y0;t.dataSM("scroll-arrows").eq(i?0:1).is(":visible")&&(t.dataSM("scroll").up=i,this.menuScroll(t,!0))}e.preventDefault()},menuScrollOut:function(t,e){mouse&&(/^scroll-(up|down)/.test((e.relatedTarget||"").className)||(t[0]==e.relatedTarget||$.contains(t[0],e.relatedTarget))&&this.getClosestMenu(e.relatedTarget)==t[0]||t.dataSM("scroll-arrows").css("visibility","hidden"))},menuScrollOver:function(t,e){if(mouse&&!/^scroll-(up|down)/.test(e.target.className)&&this.getClosestMenu(e.target)==t[0]){this.menuScrollRefreshData(t);var i=t.dataSM("scroll"),s=$(window).scrollTop()-t.dataSM("parent-a").offset().top-i.itemH;t.dataSM("scroll-arrows").eq(0).css("margin-top",s).end().eq(1).css("margin-top",s+this.getViewportHeight()-i.arrowDownH).end().css("visibility","visible")}},menuScrollRefreshData:function(t){var e=t.dataSM("scroll"),i=$(window).scrollTop()-t.dataSM("parent-a").offset().top-e.itemH;this.cssTransforms3d&&(i=-(parseFloat(t.css("margin-top"))-i)),$.extend(e,{upEnd:i,downEnd:i+this.getViewportHeight()-e.subH})},menuScrollStop:function(t){return this.scrollTimeout?(cancelAnimationFrame(this.scrollTimeout),this.scrollTimeout=0,t.dataSM("scroll").step=1,!0):void 0},menuScrollTouch:function(t,e){if(e=e.originalEvent,isTouchEvent(e)){var i=this.getTouchPoint(e);if(this.getClosestMenu(i.target)==t[0]){var s=t.dataSM("scroll");if(/(start|down)$/i.test(e.type))this.menuScrollStop(t)?(e.preventDefault(),this.$touchScrollingSub=t):this.$touchScrollingSub=null,this.menuScrollRefreshData(t),$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp});else if(/move$/i.test(e.type)){var o=void 0!==s.touchY?s.touchY:s.touchStartY;if(void 0!==o&&o!=i.pageY){this.$touchScrollingSub=t;var a=i.pageY>o;void 0!==s.up&&s.up!=a&&$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp}),$.extend(s,{up:a,touchY:i.pageY}),this.menuScroll(t,!0,Math.abs(i.pageY-o))}e.preventDefault()}else void 0!==s.touchY&&((s.momentum=15*Math.pow(Math.abs(i.pageY-s.touchStartY)/(e.timeStamp-s.touchStartTime),2))&&(this.menuScrollStop(t),this.menuScroll(t),e.preventDefault()),delete s.touchY)}}},menuShow:function(t){if((t.dataSM("beforefirstshowfired")||(t.dataSM("beforefirstshowfired",!0),this.$root.triggerHandler("beforefirstshow.smapi",t[0])!==!1))&&this.$root.triggerHandler("beforeshow.smapi",t[0])!==!1&&(t.dataSM("shown-before",!0),canAnimate&&t.stop(!0,!0),!t.is(":visible"))){var e=t.dataSM("parent-a"),i=this.isCollapsible();if((this.opts.keepHighlighted||i)&&e.addClass("highlighted"),i)t.removeClass("sm-nowrap").css({zIndex:"",width:"auto",minWidth:"",maxWidth:"",top:"",left:"",marginLeft:"",marginTop:""});else{if(t.css("z-index",this.zIndexInc=(this.zIndexInc||this.getStartZIndex())+1),(this.opts.subMenusMinWidth||this.opts.subMenusMaxWidth)&&(t.css({width:"auto",minWidth:"",maxWidth:""}).addClass("sm-nowrap"),this.opts.subMenusMinWidth&&t.css("min-width",this.opts.subMenusMinWidth),this.opts.subMenusMaxWidth)){var s=this.getWidth(t);t.css("max-width",this.opts.subMenusMaxWidth),s>this.getWidth(t)&&t.removeClass("sm-nowrap").css("width",this.opts.subMenusMaxWidth)}this.menuPosition(t)}var o=function(){t.css("overflow","")};i?canAnimate&&this.opts.collapsibleShowFunction?this.opts.collapsibleShowFunction.call(this,t,o):t.show(this.opts.collapsibleShowDuration,o):canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,t,o):t.show(this.opts.showDuration,o),e.attr("aria-expanded","true"),t.attr({"aria-expanded":"true","aria-hidden":"false"}),this.visibleSubMenus.push(t),this.$root.triggerHandler("show.smapi",t[0])}},popupHide:function(t){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},t?1:this.opts.hideTimeout)},popupShow:function(t,e){if(!this.opts.isPopup)return alert('SmartMenus jQuery Error:\n\nIf you want to show this menu via the "popupShow" method, set the isPopup:true option.'),void 0;if(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),this.$root.dataSM("shown-before",!0),canAnimate&&this.$root.stop(!0,!0),!this.$root.is(":visible")){this.$root.css({left:t,top:e});var i=this,s=function(){i.$root.css("overflow","")};canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,this.$root,s):this.$root.show(this.opts.showDuration,s),this.visibleSubMenus[0]=this.$root}},refresh:function(){this.destroy(!0),this.init(!0)},rootKeyDown:function(t){if(this.handleEvents())switch(t.keyCode){case 27:var e=this.activatedItems[0];if(e){this.menuHideAll(),e[0].focus();var i=e.dataSM("sub");i&&this.menuHide(i)}break;case 32:var s=$(t.target);if(s.is("a")&&this.handleItemEvents(s)){var i=s.dataSM("sub");i&&!i.is(":visible")&&(this.itemClick({currentTarget:t.target}),t.preventDefault())}}},rootOut:function(t){if(this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),!this.opts.showOnClick||!this.opts.hideOnClick)){var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},this.opts.hideTimeout)}},rootOver:function(t){this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0)},winResize:function(t){if(this.handleEvents()){if(!("onorientationchange"in window)||"orientationchange"==t.type){var e=this.isCollapsible();this.wasCollapsible&&e||(this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0].blur(),this.menuHideAll()),this.wasCollapsible=e}}else if(this.$disableOverlay){var i=this.$root.offset();this.$disableOverlay.css({top:i.top,left:i.left,width:this.$root.outerWidth(),height:this.$root.outerHeight()})}}}}),$.fn.dataSM=function(t,e){return e?this.data(t+"_smartmenus",e):this.data(t+"_smartmenus")},$.fn.removeDataSM=function(t){return this.removeData(t+"_smartmenus")},$.fn.smartmenus=function(options){if("string"==typeof options){var args=arguments,method=options;return Array.prototype.shift.call(args),this.each(function(){var t=$(this).data("smartmenus");t&&t[method]&&t[method].apply(t,args)})}return this.each(function(){var dataOpts=$(this).data("sm-options")||null;if(dataOpts)try{dataOpts=eval("("+dataOpts+")")}catch(e){dataOpts=null,alert('ERROR\n\nSmartMenus jQuery init:\nInvalid "data-sm-options" attribute value syntax.')}new $.SmartMenus(this,$.extend({},$.fn.smartmenus.defaults,options,dataOpts))})},$.fn.smartmenus.defaults={isPopup:!1,mainMenuSubOffsetX:0,mainMenuSubOffsetY:0,subMenusSubOffsetX:0,subMenusSubOffsetY:0,subMenusMinWidth:"10em",subMenusMaxWidth:"20em",subIndicators:!0,subIndicatorsPos:"append",subIndicatorsText:"",scrollStep:30,scrollAccelerate:!0,showTimeout:250,hideTimeout:500,showDuration:0,showFunction:null,hideDuration:0,hideFunction:function(t,e){t.fadeOut(200,e)},collapsibleShowDuration:0,collapsibleShowFunction:function(t,e){t.slideDown(200,e)},collapsibleHideDuration:0,collapsibleHideFunction:function(t,e){t.slideUp(200,e)},showOnClick:!1,hideOnClick:!0,noMouseOver:!1,keepInViewport:!0,keepHighlighted:!0,markCurrentItem:!1,markCurrentTree:!0,rightToLeftSubMenus:!1,bottomToTopSubMenus:!1,collapsibleBehavior:"default"},$}); \ No newline at end of file + * Copyright Vasil Dinkov, Vadikom Web Ltd. http://vadikom.com; Licensed MIT */(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):"object"==typeof module&&"object"==typeof module.exports?module.exports=t(require("jquery")):t(jQuery)})(function($){function initMouseDetection(t){var e=".smartmenus_mouse";if(mouseDetectionEnabled||t)mouseDetectionEnabled&&t&&($(document).off(e),mouseDetectionEnabled=!1);else{var i=!0,s=null,o={mousemove:function(t){var e={x:t.pageX,y:t.pageY,timeStamp:(new Date).getTime()};if(s){var o=Math.abs(s.x-e.x),a=Math.abs(s.y-e.y);if((o>0||a>0)&&2>=o&&2>=a&&300>=e.timeStamp-s.timeStamp&&(mouse=!0,i)){var n=$(t.target).closest("a");n.is("a")&&$.each(menuTrees,function(){return $.contains(this.$root[0],n[0])?(this.itemEnter({currentTarget:n[0]}),!1):void 0}),i=!1}}s=e}};o[touchEvents?"touchstart":"pointerover pointermove pointerout MSPointerOver MSPointerMove MSPointerOut"]=function(t){isTouchEvent(t.originalEvent)&&(mouse=!1)},$(document).on(getEventsNS(o,e)), +mouseDetectionEnabled=!0}}function isTouchEvent(t){return!/^(4|mouse)$/.test(t.pointerType)}function getEventsNS(t,e){e||(e="");var i={};for(var s in t)i[s.split(" ").join(e+" ")+e]=t[s];return i}var menuTrees=[],mouse=!1,touchEvents="ontouchstart"in window,mouseDetectionEnabled=!1,requestAnimationFrame=window.requestAnimationFrame||function(t){return setTimeout(t,1e3/60)},cancelAnimationFrame=window.cancelAnimationFrame||function(t){clearTimeout(t)},canAnimate=!!$.fn.animate;return $.SmartMenus=function(t,e){this.$root=$(t),this.opts=e,this.rootId="",this.accessIdPrefix="",this.$subArrow=null,this.activatedItems=[],this.visibleSubMenus=[],this.showTimeout=0,this.hideTimeout=0,this.scrollTimeout=0,this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.idInc=0,this.$firstLink=null,this.$firstSub=null,this.disabled=!1,this.$disableOverlay=null,this.$touchScrollingSub=null,this.cssTransforms3d="perspective"in t.style||"webkitPerspective"in t.style,this.wasCollapsible=!1,this.init()},$.extend( +$.SmartMenus,{hideAll:function(){$.each(menuTrees,function(){this.menuHideAll()})},destroy:function(){for(;menuTrees.length;)menuTrees[0].destroy();initMouseDetection(!0)},prototype:{init:function(t){var e=this;if(!t){menuTrees.push(this),this.rootId=((new Date).getTime()+Math.random()+"").replace(/\D/g,""),this.accessIdPrefix="sm-"+this.rootId+"-",this.$root.hasClass("sm-rtl")&&(this.opts.rightToLeftSubMenus=!0);var i=".smartmenus";this.$root.data("smartmenus",this).attr("data-smartmenus-id",this.rootId).dataSM("level",1).on(getEventsNS({"mouseover focusin":$.proxy(this.rootOver,this),"mouseout focusout":$.proxy(this.rootOut,this),keydown:$.proxy(this.rootKeyDown,this)},i)).on(getEventsNS({mouseenter:$.proxy(this.itemEnter,this),mouseleave:$.proxy(this.itemLeave,this),mousedown:$.proxy(this.itemDown,this),focus:$.proxy(this.itemFocus,this),blur:$.proxy(this.itemBlur,this),click:$.proxy(this.itemClick,this)},i),"a"),i+=this.rootId,this.opts.hideOnClick&&$(document).on(getEventsNS({touchstart:$.proxy( +this.docTouchStart,this),touchmove:$.proxy(this.docTouchMove,this),touchend:$.proxy(this.docTouchEnd,this),click:$.proxy(this.docClick,this)},i)),$(window).on(getEventsNS({"resize orientationchange":$.proxy(this.winResize,this)},i)),this.opts.subIndicators&&(this.$subArrow=$("").addClass("sub-arrow"),this.opts.subIndicatorsText&&this.$subArrow.html(this.opts.subIndicatorsText)),initMouseDetection()}if(this.$firstSub=this.$root.find("ul").each(function(){e.menuInit($(this))}).eq(0),this.$firstLink=this.$root.find("a").eq(0),this.opts.markCurrentItem){var s=/(index|default)\.[^#\?\/]*/i,o=/#.*/,a=window.location.href.replace(s,""),n=a.replace(o,"");this.$root.find("a").each(function(){var t=this.href.replace(s,""),i=$(this);(t==a||t==n)&&(i.addClass("current"),e.opts.markCurrentTree&&i.parentsUntil("[data-smartmenus-id]","ul").each(function(){$(this).dataSM("parent-a").addClass("current")}))})}this.wasCollapsible=this.isCollapsible()},destroy:function(t){if(!t){var e=".smartmenus";this.$root.removeData( +"smartmenus").removeAttr("data-smartmenus-id").removeDataSM("level").off(e),e+=this.rootId,$(document).off(e),$(window).off(e),this.opts.subIndicators&&(this.$subArrow=null)}this.menuHideAll();var i=this;this.$root.find("ul").each(function(){var t=$(this);t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.dataSM("shown-before")&&((i.opts.subMenusMinWidth||i.opts.subMenusMaxWidth)&&t.css({width:"",minWidth:"",maxWidth:""}).removeClass("sm-nowrap"),t.dataSM("scroll-arrows")&&t.dataSM("scroll-arrows").remove(),t.css({zIndex:"",top:"",left:"",marginLeft:"",marginTop:"",display:""})),0==(t.attr("id")||"").indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeDataSM("in-mega").removeDataSM("shown-before").removeDataSM("scroll-arrows").removeDataSM("parent-a").removeDataSM("level").removeDataSM("beforefirstshowfired").removeAttr("role").removeAttr("aria-hidden").removeAttr("aria-labelledby").removeAttr("aria-expanded"),this.$root.find("a.has-submenu").each(function(){var t=$(this);0==t.attr("id" +).indexOf(i.accessIdPrefix)&&t.removeAttr("id")}).removeClass("has-submenu").removeDataSM("sub").removeAttr("aria-haspopup").removeAttr("aria-controls").removeAttr("aria-expanded").closest("li").removeDataSM("sub"),this.opts.subIndicators&&this.$root.find("span.sub-arrow").remove(),this.opts.markCurrentItem&&this.$root.find("a.current").removeClass("current"),t||(this.$root=null,this.$firstLink=null,this.$firstSub=null,this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),menuTrees.splice($.inArray(this,menuTrees),1))},disable:function(t){if(!this.disabled){if(this.menuHideAll(),!t&&!this.opts.isPopup&&this.$root.is(":visible")){var e=this.$root.offset();this.$disableOverlay=$('
    ').css({position:"absolute",top:e.top,left:e.left,width:this.$root.outerWidth(),height:this.$root.outerHeight(),zIndex:this.getStartZIndex(!0),opacity:0}).appendTo(document.body)}this.disabled=!0}},docClick:function(t){return this.$touchScrollingSub?( +this.$touchScrollingSub=null,void 0):((this.visibleSubMenus.length&&!$.contains(this.$root[0],t.target)||$(t.target).closest("a").length)&&this.menuHideAll(),void 0)},docTouchEnd:function(){if(this.lastTouch){if(!(!this.visibleSubMenus.length||void 0!==this.lastTouch.x2&&this.lastTouch.x1!=this.lastTouch.x2||void 0!==this.lastTouch.y2&&this.lastTouch.y1!=this.lastTouch.y2||this.lastTouch.target&&$.contains(this.$root[0],this.lastTouch.target))){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var t=this;this.hideTimeout=setTimeout(function(){t.menuHideAll()},350)}this.lastTouch=null}},docTouchMove:function(t){if(this.lastTouch){var e=t.originalEvent.touches[0];this.lastTouch.x2=e.pageX,this.lastTouch.y2=e.pageY}},docTouchStart:function(t){var e=t.originalEvent.touches[0];this.lastTouch={x1:e.pageX,y1:e.pageY,target:e.target}},enable:function(){this.disabled&&(this.$disableOverlay&&(this.$disableOverlay.remove(),this.$disableOverlay=null),this.disabled=!1)},getClosestMenu:function(t){for( +var e=$(t).closest("ul");e.dataSM("in-mega");)e=e.parent().closest("ul");return e[0]||null},getHeight:function(t){return this.getOffset(t,!0)},getOffset:function(t,e){var i;"none"==t.css("display")&&(i={position:t[0].style.position,visibility:t[0].style.visibility},t.css({position:"absolute",visibility:"hidden"}).show());var s=t[0].getBoundingClientRect&&t[0].getBoundingClientRect(),o=s&&(e?s.height||s.bottom-s.top:s.width||s.right-s.left);return o||0===o||(o=e?t[0].offsetHeight:t[0].offsetWidth),i&&t.hide().css(i),o},getStartZIndex:function(t){var e=parseInt(this[t?"$root":"$firstSub"].css("z-index"));return!t&&isNaN(e)&&(e=parseInt(this.$root.css("z-index"))),isNaN(e)?1:e},getTouchPoint:function(t){return t.touches&&t.touches[0]||t.changedTouches&&t.changedTouches[0]||t},getViewport:function(t){var e=t?"Height":"Width",i=document.documentElement["client"+e],s=window["inner"+e];return s&&(i=Math.min(i,s)),i},getViewportHeight:function(){return this.getViewport(!0)},getViewportWidth:function(){ +return this.getViewport()},getWidth:function(t){return this.getOffset(t)},handleEvents:function(){return!this.disabled&&this.isCSSOn()},handleItemEvents:function(t){return this.handleEvents()&&!this.isLinkInMegaMenu(t)},isCollapsible:function(){return"static"==this.$firstSub.css("position")},isCSSOn:function(){return"inline"!=this.$firstLink.css("display")},isFixed:function(){var t="fixed"==this.$root.css("position");return t||this.$root.parentsUntil("body").each(function(){return"fixed"==$(this).css("position")?(t=!0,!1):void 0}),t},isLinkInMegaMenu:function(t){return $(this.getClosestMenu(t[0])).hasClass("mega-menu")},isTouchMode:function(){return!mouse||this.opts.noMouseOver||this.isCollapsible()},itemActivate:function(t,e){var i=t.closest("ul"),s=i.dataSM("level");if(s>1&&(!this.activatedItems[s-2]||this.activatedItems[s-2][0]!=i.dataSM("parent-a")[0])){var o=this;$(i.parentsUntil("[data-smartmenus-id]","ul").get().reverse()).add(i).each(function(){o.itemActivate($(this).dataSM("parent-a"))})}if(( +!this.isCollapsible()||e)&&this.menuHideSubMenus(this.activatedItems[s-1]&&this.activatedItems[s-1][0]==t[0]?s:s-1),this.activatedItems[s-1]=t,this.$root.triggerHandler("activate.smapi",t[0])!==!1){var a=t.dataSM("sub");a&&(this.isTouchMode()||!this.opts.showOnClick||this.clickActivated)&&this.menuShow(a)}},itemBlur:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&this.$root.triggerHandler("blur.smapi",e[0])},itemClick:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(this.$touchScrollingSub&&this.$touchScrollingSub[0]==e.closest("ul")[0])return this.$touchScrollingSub=null,t.stopPropagation(),!1;if(this.$root.triggerHandler("click.smapi",e[0])===!1)return!1;var i=$(t.target).is(".sub-arrow"),s=e.dataSM("sub"),o=s?2==s.dataSM("level"):!1,a=this.isCollapsible(),n=/toggle$/.test(this.opts.collapsibleBehavior),r=/link$/.test(this.opts.collapsibleBehavior),h=/^accordion/.test(this.opts.collapsibleBehavior);if(s&&!s.is(":visible")){if((!r||!a||i)&&(this.opts.showOnClick&&o&&( +this.clickActivated=!0),this.itemActivate(e,h),s.is(":visible")))return this.focusActivated=!0,!1}else if(a&&(n||i))return this.itemActivate(e,h),this.menuHide(s),n&&(this.focusActivated=!1),!1;return this.opts.showOnClick&&o||e.hasClass("disabled")||this.$root.triggerHandler("select.smapi",e[0])===!1?!1:void 0}},itemDown:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&e.dataSM("mousedown",!0)},itemEnter:function(t){var e=$(t.currentTarget);if(this.handleItemEvents(e)){if(!this.isTouchMode()){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);var i=this;this.showTimeout=setTimeout(function(){i.itemActivate(e)},this.opts.showOnClick&&1==e.closest("ul").dataSM("level")?1:this.opts.showTimeout)}this.$root.triggerHandler("mouseenter.smapi",e[0])}},itemFocus:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(!this.focusActivated||this.isTouchMode()&&e.dataSM("mousedown")||this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0]==e[0 +]||this.itemActivate(e,!0),this.$root.triggerHandler("focus.smapi",e[0]))},itemLeave:function(t){var e=$(t.currentTarget);this.handleItemEvents(e)&&(this.isTouchMode()||(e[0].blur(),this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0)),e.removeDataSM("mousedown"),this.$root.triggerHandler("mouseleave.smapi",e[0]))},menuHide:function(t){if(this.$root.triggerHandler("beforehide.smapi",t[0])!==!1&&(canAnimate&&t.stop(!0,!0),"none"!=t.css("display"))){var e=function(){t.css("z-index","")};this.isCollapsible()?canAnimate&&this.opts.collapsibleHideFunction?this.opts.collapsibleHideFunction.call(this,t,e):t.hide(this.opts.collapsibleHideDuration,e):canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,t,e):t.hide(this.opts.hideDuration,e),t.dataSM("scroll")&&(this.menuScrollStop(t),t.css({"touch-action":"","-ms-touch-action":"","-webkit-transform":"",transform:""}).off(".smartmenus_scroll").removeDataSM("scroll").dataSM("scroll-arrows").hide()),t.dataSM("parent-a").removeClass( +"highlighted").attr("aria-expanded","false"),t.attr({"aria-expanded":"false","aria-hidden":"true"});var i=t.dataSM("level");this.activatedItems.splice(i-1,1),this.visibleSubMenus.splice($.inArray(t,this.visibleSubMenus),1),this.$root.triggerHandler("hide.smapi",t[0])}},menuHideAll:function(){this.showTimeout&&(clearTimeout(this.showTimeout),this.showTimeout=0);for(var t=this.opts.isPopup?1:0,e=this.visibleSubMenus.length-1;e>=t;e--)this.menuHide(this.visibleSubMenus[e]);this.opts.isPopup&&(canAnimate&&this.$root.stop(!0,!0),this.$root.is(":visible")&&(canAnimate&&this.opts.hideFunction?this.opts.hideFunction.call(this,this.$root):this.$root.hide(this.opts.hideDuration))),this.activatedItems=[],this.visibleSubMenus=[],this.clickActivated=!1,this.focusActivated=!1,this.zIndexInc=0,this.$root.triggerHandler("hideAll.smapi")},menuHideSubMenus:function(t){for(var e=this.activatedItems.length-1;e>=t;e--){var i=this.activatedItems[e].dataSM("sub");i&&this.menuHide(i)}},menuInit:function(t){if(!t.dataSM("in-mega")){ +t.hasClass("mega-menu")&&t.find("ul").dataSM("in-mega",!0);for(var e=2,i=t[0];(i=i.parentNode.parentNode)!=this.$root[0];)e++;var s=t.prevAll("a").eq(-1);s.length||(s=t.prevAll().find("a").eq(-1)),s.addClass("has-submenu").dataSM("sub",t),t.dataSM("parent-a",s).dataSM("level",e).parent().dataSM("sub",t);var o=s.attr("id")||this.accessIdPrefix+ ++this.idInc,a=t.attr("id")||this.accessIdPrefix+ ++this.idInc;s.attr({id:o,"aria-haspopup":"true","aria-controls":a,"aria-expanded":"false"}),t.attr({id:a,role:"group","aria-hidden":"true","aria-labelledby":o,"aria-expanded":"false"}),this.opts.subIndicators&&s[this.opts.subIndicatorsPos](this.$subArrow.clone())}},menuPosition:function(t){var e,i,s=t.dataSM("parent-a"),o=s.closest("li"),a=o.parent(),n=t.dataSM("level"),r=this.getWidth(t),h=this.getHeight(t),u=s.offset(),l=u.left,c=u.top,d=this.getWidth(s),m=this.getHeight(s),p=$(window),f=p.scrollLeft(),v=p.scrollTop(),b=this.getViewportWidth(),S=this.getViewportHeight(),g=a.parent().is("[data-sm-horizontal-sub]" +)||2==n&&!a.hasClass("sm-vertical"),M=this.opts.rightToLeftSubMenus&&!o.is("[data-sm-reverse]")||!this.opts.rightToLeftSubMenus&&o.is("[data-sm-reverse]"),w=2==n?this.opts.mainMenuSubOffsetX:this.opts.subMenusSubOffsetX,T=2==n?this.opts.mainMenuSubOffsetY:this.opts.subMenusSubOffsetY;if(g?(e=M?d-r-w:w,i=this.opts.bottomToTopSubMenus?-h-T:m+T):(e=M?w-r:d-w,i=this.opts.bottomToTopSubMenus?m-T-h:T),this.opts.keepInViewport){var y=l+e,I=c+i;if(M&&f>y?e=g?f-y+e:d-w:!M&&y+r>f+b&&(e=g?f+b-r-y+e:w-r),g||(S>h&&I+h>v+S?i+=v+S-h-I:(h>=S||v>I)&&(i+=v-I)),g&&(I+h>v+S+.49||v>I)||!g&&h>S+.49){var x=this;t.dataSM("scroll-arrows")||t.dataSM("scroll-arrows",$([$('')[0],$('')[0]]).on({mouseenter:function(){t.dataSM("scroll").up=$(this).hasClass("scroll-up"),x.menuScroll(t)},mouseleave:function(e){x.menuScrollStop(t),x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(t){ +t.preventDefault()}}).insertAfter(t));var A=".smartmenus_scroll";if(t.dataSM("scroll",{y:this.cssTransforms3d?0:i-m,step:1,itemH:m,subH:h,arrowDownH:this.getHeight(t.dataSM("scroll-arrows").eq(1))}).on(getEventsNS({mouseover:function(e){x.menuScrollOver(t,e)},mouseout:function(e){x.menuScrollOut(t,e)},"mousewheel DOMMouseScroll":function(e){x.menuScrollMousewheel(t,e)}},A)).dataSM("scroll-arrows").css({top:"auto",left:"0",marginLeft:e+(parseInt(t.css("border-left-width"))||0),width:r-(parseInt(t.css("border-left-width"))||0)-(parseInt(t.css("border-right-width"))||0),zIndex:t.css("z-index")}).eq(g&&this.opts.bottomToTopSubMenus?0:1).show(),this.isFixed()){var C={};C[touchEvents?"touchstart touchmove touchend":"pointerdown pointermove pointerup MSPointerDown MSPointerMove MSPointerUp"]=function(e){x.menuScrollTouch(t,e)},t.css({"touch-action":"none","-ms-touch-action":"none"}).on(getEventsNS(C,A))}}}t.css({top:"auto",left:"0",marginLeft:e,marginTop:i-m})},menuScroll:function(t,e,i){var s,o=t.dataSM("scroll"), +a=t.dataSM("scroll-arrows"),n=o.up?o.upEnd:o.downEnd;if(!e&&o.momentum){if(o.momentum*=.92,s=o.momentum,.5>s)return this.menuScrollStop(t),void 0}else s=i||(e||!this.opts.scrollAccelerate?this.opts.scrollStep:Math.floor(o.step));var r=t.dataSM("level");if(this.activatedItems[r-1]&&this.activatedItems[r-1].dataSM("sub")&&this.activatedItems[r-1].dataSM("sub").is(":visible")&&this.menuHideSubMenus(r-1),o.y=o.up&&o.y>=n||!o.up&&n>=o.y?o.y:Math.abs(n-o.y)>s?o.y+(o.up?s:-s):n,t.css(this.cssTransforms3d?{"-webkit-transform":"translate3d(0, "+o.y+"px, 0)",transform:"translate3d(0, "+o.y+"px, 0)"}:{marginTop:o.y}),mouse&&(o.up&&o.y>o.downEnd||!o.up&&o.y0;t.dataSM("scroll-arrows").eq(i?0:1).is(":visible")&&(t.dataSM("scroll").up=i,this.menuScroll(t,!0))}e.preventDefault()},menuScrollOut:function(t,e){mouse&&(/^scroll-(up|down)/.test((e.relatedTarget||"").className)||(t[0]==e.relatedTarget||$.contains(t[0],e.relatedTarget))&&this.getClosestMenu(e.relatedTarget)==t[0]||t.dataSM("scroll-arrows").css("visibility","hidden"))},menuScrollOver:function(t,e){if(mouse&&!/^scroll-(up|down)/.test(e.target.className)&&this.getClosestMenu(e.target)==t[0]){this.menuScrollRefreshData(t);var i=t.dataSM("scroll"),s=$(window).scrollTop()-t.dataSM("parent-a").offset().top-i.itemH;t.dataSM("scroll-arrows").eq(0).css("margin-top",s).end().eq(1).css("margin-top",s+this.getViewportHeight()-i.arrowDownH).end().css("visibility","visible")}},menuScrollRefreshData:function(t){var e=t.dataSM("scroll"),i=$(window).scrollTop()-t.dataSM("parent-a").offset().top-e.itemH;this.cssTransforms3d&&(i=-(parseFloat(t.css("margin-top"))-i)),$.extend(e,{upEnd:i, +downEnd:i+this.getViewportHeight()-e.subH})},menuScrollStop:function(t){return this.scrollTimeout?(cancelAnimationFrame(this.scrollTimeout),this.scrollTimeout=0,t.dataSM("scroll").step=1,!0):void 0},menuScrollTouch:function(t,e){if(e=e.originalEvent,isTouchEvent(e)){var i=this.getTouchPoint(e);if(this.getClosestMenu(i.target)==t[0]){var s=t.dataSM("scroll");if(/(start|down)$/i.test(e.type))this.menuScrollStop(t)?(e.preventDefault(),this.$touchScrollingSub=t):this.$touchScrollingSub=null,this.menuScrollRefreshData(t),$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp});else if(/move$/i.test(e.type)){var o=void 0!==s.touchY?s.touchY:s.touchStartY;if(void 0!==o&&o!=i.pageY){this.$touchScrollingSub=t;var a=i.pageY>o;void 0!==s.up&&s.up!=a&&$.extend(s,{touchStartY:i.pageY,touchStartTime:e.timeStamp}),$.extend(s,{up:a,touchY:i.pageY}),this.menuScroll(t,!0,Math.abs(i.pageY-o))}e.preventDefault()}else void 0!==s.touchY&&((s.momentum=15*Math.pow(Math.abs(i.pageY-s.touchStartY)/(e.timeStamp-s.touchStartTime),2) +)&&(this.menuScrollStop(t),this.menuScroll(t),e.preventDefault()),delete s.touchY)}}},menuShow:function(t){if((t.dataSM("beforefirstshowfired")||(t.dataSM("beforefirstshowfired",!0),this.$root.triggerHandler("beforefirstshow.smapi",t[0])!==!1))&&this.$root.triggerHandler("beforeshow.smapi",t[0])!==!1&&(t.dataSM("shown-before",!0),canAnimate&&t.stop(!0,!0),!t.is(":visible"))){var e=t.dataSM("parent-a"),i=this.isCollapsible();if((this.opts.keepHighlighted||i)&&e.addClass("highlighted"),i)t.removeClass("sm-nowrap").css({zIndex:"",width:"auto",minWidth:"",maxWidth:"",top:"",left:"",marginLeft:"",marginTop:""});else{if(t.css("z-index",this.zIndexInc=(this.zIndexInc||this.getStartZIndex())+1),(this.opts.subMenusMinWidth||this.opts.subMenusMaxWidth)&&(t.css({width:"auto",minWidth:"",maxWidth:""}).addClass("sm-nowrap"),this.opts.subMenusMinWidth&&t.css("min-width",this.opts.subMenusMinWidth),this.opts.subMenusMaxWidth)){var s=this.getWidth(t);t.css("max-width",this.opts.subMenusMaxWidth),s>this.getWidth(t +)&&t.removeClass("sm-nowrap").css("width",this.opts.subMenusMaxWidth)}this.menuPosition(t)}var o=function(){t.css("overflow","")};i?canAnimate&&this.opts.collapsibleShowFunction?this.opts.collapsibleShowFunction.call(this,t,o):t.show(this.opts.collapsibleShowDuration,o):canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,t,o):t.show(this.opts.showDuration,o),e.attr("aria-expanded","true"),t.attr({"aria-expanded":"true","aria-hidden":"false"}),this.visibleSubMenus.push(t),this.$root.triggerHandler("show.smapi",t[0])}},popupHide:function(t){this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0);var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},t?1:this.opts.hideTimeout)},popupShow:function(t,e){if(!this.opts.isPopup)return alert('SmartMenus jQuery Error:\n\nIf you want to show this menu via the "popupShow" method, set the isPopup:true option.'),void 0;if(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),this.$root.dataSM("shown-before",!0), +canAnimate&&this.$root.stop(!0,!0),!this.$root.is(":visible")){this.$root.css({left:t,top:e});var i=this,s=function(){i.$root.css("overflow","")};canAnimate&&this.opts.showFunction?this.opts.showFunction.call(this,this.$root,s):this.$root.show(this.opts.showDuration,s),this.visibleSubMenus[0]=this.$root}},refresh:function(){this.destroy(!0),this.init(!0)},rootKeyDown:function(t){if(this.handleEvents())switch(t.keyCode){case 27:var e=this.activatedItems[0];if(e){this.menuHideAll(),e[0].focus();var i=e.dataSM("sub");i&&this.menuHide(i)}break;case 32:var s=$(t.target);if(s.is("a")&&this.handleItemEvents(s)){var i=s.dataSM("sub");i&&!i.is(":visible")&&(this.itemClick({currentTarget:t.target}),t.preventDefault())}}},rootOut:function(t){if(this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&(this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0),!this.opts.showOnClick||!this.opts.hideOnClick)){var e=this;this.hideTimeout=setTimeout(function(){e.menuHideAll()},this.opts.hideTimeout)}}, +rootOver:function(t){this.handleEvents()&&!this.isTouchMode()&&t.target!=this.$root[0]&&this.hideTimeout&&(clearTimeout(this.hideTimeout),this.hideTimeout=0)},winResize:function(t){if(this.handleEvents()){if(!("onorientationchange"in window)||"orientationchange"==t.type){var e=this.isCollapsible();this.wasCollapsible&&e||(this.activatedItems.length&&this.activatedItems[this.activatedItems.length-1][0].blur(),this.menuHideAll()),this.wasCollapsible=e}}else if(this.$disableOverlay){var i=this.$root.offset();this.$disableOverlay.css({top:i.top,left:i.left,width:this.$root.outerWidth(),height:this.$root.outerHeight()})}}}}),$.fn.dataSM=function(t,e){return e?this.data(t+"_smartmenus",e):this.data(t+"_smartmenus")},$.fn.removeDataSM=function(t){return this.removeData(t+"_smartmenus")},$.fn.smartmenus=function(options){if("string"==typeof options){var args=arguments,method=options;return Array.prototype.shift.call(args),this.each(function(){var t=$(this).data("smartmenus");t&&t[method]&&t[method].apply(t,args)})} +return this.each(function(){var dataOpts=$(this).data("sm-options")||null;if(dataOpts)try{dataOpts=eval("("+dataOpts+")")}catch(e){dataOpts=null,alert('ERROR\n\nSmartMenus jQuery init:\nInvalid "data-sm-options" attribute value syntax.')}new $.SmartMenus(this,$.extend({},$.fn.smartmenus.defaults,options,dataOpts))})},$.fn.smartmenus.defaults={isPopup:!1,mainMenuSubOffsetX:0,mainMenuSubOffsetY:0,subMenusSubOffsetX:0,subMenusSubOffsetY:0,subMenusMinWidth:"10em",subMenusMaxWidth:"20em",subIndicators:!0,subIndicatorsPos:"append",subIndicatorsText:"",scrollStep:30,scrollAccelerate:!0,showTimeout:250,hideTimeout:500,showDuration:0,showFunction:null,hideDuration:0,hideFunction:function(t,e){t.fadeOut(200,e)},collapsibleShowDuration:0,collapsibleShowFunction:function(t,e){t.slideDown(200,e)},collapsibleHideDuration:0,collapsibleHideFunction:function(t,e){t.slideUp(200,e)},showOnClick:!1,hideOnClick:!0,noMouseOver:!1,keepInViewport:!0,keepHighlighted:!0,markCurrentItem:!1,markCurrentTree:!0,rightToLeftSubMenus:!1, +bottomToTopSubMenus:!1,collapsibleBehavior:"default"},$}); diff --git a/depends/mimalloc/docs/mimalloc-doc_8h_source.html b/depends/mimalloc/docs/mimalloc-doc_8h_source.html index 8935de8a8890..3c1ad4fbaed1 100644 --- a/depends/mimalloc/docs/mimalloc-doc_8h_source.html +++ b/depends/mimalloc/docs/mimalloc-doc_8h_source.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: mimalloc-doc.h Source File + - + + @@ -29,20 +31,16 @@
    - + - -
    -
    mi-malloc -  1.7/2.0 +
    +
    mi-malloc 1.8/2.1
    +
    - -   + @@ -56,10 +54,15 @@
    - + +
    @@ -74,8 +77,8 @@
    @@ -88,473 +91,584 @@
    - +
    +
    +
    +
    +
    Loading...
    +
    Searching...
    +
    No Matches
    +
    +
    +
    -
    -
    mimalloc-doc.h
    +
    mimalloc-doc.h
    -
    1 /* ----------------------------------------------------------------------------
    -
    2 Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
    -
    3 This is free software; you can redistribute it and/or modify it under the
    -
    4 terms of the MIT license. A copy of the license can be found in the file
    -
    5 "LICENSE" at the root of this distribution.
    -
    6 -----------------------------------------------------------------------------*/
    -
    7 
    -
    8 #error "documentation file only!"
    -
    9 
    -
    10 
    -
    94 
    -
    95 
    -
    99 void mi_free(void* p);
    -
    100 
    -
    105 void* mi_malloc(size_t size);
    -
    106 
    -
    111 void* mi_zalloc(size_t size);
    -
    112 
    -
    122 void* mi_calloc(size_t count, size_t size);
    -
    123 
    -
    136 void* mi_realloc(void* p, size_t newsize);
    -
    137 
    -
    148 void* mi_recalloc(void* p, size_t count, size_t size);
    -
    149 
    -
    163 void* mi_expand(void* p, size_t newsize);
    -
    164 
    -
    174 void* mi_mallocn(size_t count, size_t size);
    -
    175 
    -
    185 void* mi_reallocn(void* p, size_t count, size_t size);
    -
    186 
    -
    203 void* mi_reallocf(void* p, size_t newsize);
    -
    204 
    -
    205 
    -
    214 char* mi_strdup(const char* s);
    -
    215 
    -
    225 char* mi_strndup(const char* s, size_t n);
    -
    226 
    -
    239 char* mi_realpath(const char* fname, char* resolved_name);
    -
    240 
    -
    242 
    -
    243 // ------------------------------------------------------
    -
    244 // Extended functionality
    -
    245 // ------------------------------------------------------
    -
    246 
    -
    250 
    -
    253 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
    -
    254 
    -
    262 void* mi_malloc_small(size_t size);
    -
    263 
    -
    271 void* mi_zalloc_small(size_t size);
    -
    272 
    -
    287 size_t mi_usable_size(void* p);
    -
    288 
    -
    298 size_t mi_good_size(size_t size);
    -
    299 
    -
    307 void mi_collect(bool force);
    -
    308 
    -
    313 void mi_stats_print(void* out);
    -
    314 
    -
    320 void mi_stats_print_out(mi_output_fun* out, void* arg);
    -
    321 
    -
    323 void mi_stats_reset(void);
    -
    324 
    -
    326 void mi_stats_merge(void);
    -
    327 
    -
    331 void mi_thread_init(void);
    -
    332 
    -
    337 void mi_thread_done(void);
    -
    338 
    - -
    345 
    -
    352 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
    -
    353 
    -
    369 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
    -
    370 
    -
    376 typedef void (mi_output_fun)(const char* msg, void* arg);
    -
    377 
    -
    384 void mi_register_output(mi_output_fun* out, void* arg);
    -
    385 
    -
    391 typedef void (mi_error_fun)(int err, void* arg);
    -
    392 
    -
    408 void mi_register_error(mi_error_fun* errfun, void* arg);
    -
    409 
    -
    414 bool mi_is_in_heap_region(const void* p);
    -
    415 
    -
    424 int mi_reserve_os_memory(size_t size, bool commit, bool allow_large);
    -
    425 
    -
    437 bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node);
    -
    438 
    -
    451 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
    -
    452 
    -
    465 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
    -
    466 
    -
    467 
    - -
    473 
    -
    487 void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults);
    -
    488 
    -
    490 
    -
    491 // ------------------------------------------------------
    -
    492 // Aligned allocation
    -
    493 // ------------------------------------------------------
    -
    494 
    -
    500 
    -
    502 #define MI_ALIGNMENT_MAX (1024*1024UL)
    -
    503 
    -
    516 void* mi_malloc_aligned(size_t size, size_t alignment);
    -
    517 void* mi_zalloc_aligned(size_t size, size_t alignment);
    -
    518 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
    -
    519 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
    -
    520 
    -
    531 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
    -
    532 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
    -
    533 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
    -
    534 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
    -
    535 
    -
    537 
    -
    543 
    -
    548 struct mi_heap_s;
    -
    549 
    -
    554 typedef struct mi_heap_s mi_heap_t;
    -
    555 
    - -
    558 
    - -
    567 
    - -
    576 
    - -
    581 
    - -
    585 
    - -
    592 
    -
    594 void mi_heap_collect(mi_heap_t* heap, bool force);
    -
    595 
    -
    598 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
    -
    599 
    -
    603 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
    -
    604 
    -
    607 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
    -
    608 
    -
    611 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
    -
    612 
    -
    615 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
    -
    616 
    -
    619 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
    -
    620 
    -
    623 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
    -
    624 
    -
    627 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
    -
    628 
    -
    629 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
    -
    630 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
    -
    631 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
    -
    632 
    -
    633 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
    -
    634 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
    -
    635 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
    -
    636 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
    -
    637 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
    -
    638 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
    -
    639 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
    -
    640 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
    -
    641 
    -
    643 
    -
    644 
    -
    653 
    -
    654 void* mi_rezalloc(void* p, size_t newsize);
    -
    655 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
    -
    656 
    -
    657 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
    -
    658 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
    -
    659 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
    -
    660 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
    -
    661 
    -
    662 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
    -
    663 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
    -
    664 
    -
    665 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
    -
    666 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
    -
    667 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
    -
    668 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
    -
    669 
    -
    671 
    -
    680 
    -
    692 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
    -
    693 
    -
    695 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
    -
    696 
    -
    698 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
    -
    699 
    -
    701 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
    -
    702 
    -
    704 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
    -
    705 
    -
    707 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
    -
    708 
    -
    710 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
    -
    711 
    -
    713 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
    -
    714 
    -
    716 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
    -
    717 
    -
    719 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
    -
    720 
    -
    722 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
    -
    723 
    -
    725 
    -
    731 
    -
    738 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
    -
    739 
    -
    748 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
    -
    749 
    -
    757 bool mi_check_owned(const void* p);
    -
    758 
    -
    761 typedef struct mi_heap_area_s {
    -
    762  void* blocks;
    -
    763  size_t reserved;
    -
    764  size_t committed;
    -
    765  size_t used;
    -
    766  size_t block_size;
    - -
    768 
    -
    776 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
    -
    777 
    -
    789 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
    -
    790 
    -
    792 
    -
    798 
    -
    800 typedef enum mi_option_e {
    -
    801  // stable options
    - - - -
    805  // the following options are experimental
    - - - - - - - - - - - - - - - -
    821 
    -
    822 
    - - - -
    826 void mi_option_set_enabled(mi_option_t option, bool enable);
    -
    827 void mi_option_set_enabled_default(mi_option_t option, bool enable);
    -
    828 
    - -
    830 void mi_option_set(mi_option_t option, long value);
    -
    831 void mi_option_set_default(mi_option_t option, long value);
    -
    832 
    -
    833 
    -
    835 
    -
    842 
    -
    843 void* mi_recalloc(void* p, size_t count, size_t size);
    -
    844 size_t mi_malloc_size(const void* p);
    -
    845 size_t mi_malloc_usable_size(const void *p);
    -
    846 
    -
    848 void mi_cfree(void* p);
    -
    849 
    -
    850 int mi_posix_memalign(void** p, size_t alignment, size_t size);
    -
    851 int mi__posix_memalign(void** p, size_t alignment, size_t size);
    -
    852 void* mi_memalign(size_t alignment, size_t size);
    -
    853 void* mi_valloc(size_t size);
    -
    854 
    -
    855 void* mi_pvalloc(size_t size);
    -
    856 void* mi_aligned_alloc(size_t alignment, size_t size);
    -
    857 
    -
    860 void* mi_reallocarray(void* p, size_t count, size_t size);
    -
    861 
    -
    863 int mi_reallocarr(void* p, size_t count, size_t size);
    -
    864 
    -
    865 void mi_free_size(void* p, size_t size);
    -
    866 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
    -
    867 void mi_free_aligned(void* p, size_t alignment);
    -
    868 
    -
    870 
    -
    883 
    -
    885 void* mi_new(std::size_t n) noexcept(false);
    -
    886 
    -
    888 void* mi_new_n(size_t count, size_t size) noexcept(false);
    -
    889 
    -
    891 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
    -
    892 
    -
    894 void* mi_new_nothrow(size_t n);
    -
    895 
    -
    897 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
    -
    898 
    -
    900 void* mi_new_realloc(void* p, size_t newsize);
    -
    901 
    -
    903 void* mi_new_reallocn(void* p, size_t newcount, size_t size);
    -
    904 
    -
    912 template<class T> struct mi_stl_allocator { }
    -
    913 
    -
    915 
    -
    void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
    -
    void * mi_zalloc_aligned(size_t size, size_t alignment)
    -
    void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
    -
    void * mi_calloc_aligned(size_t count, size_t size, size_t alignment)
    -
    void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
    Allocate size bytes aligned by alignment at a specified offset.
    -
    void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
    -
    void * mi_malloc_aligned(size_t size, size_t alignment)
    Allocate size bytes aligned by alignment.
    -
    void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
    -
    size_t block_size
    size in bytes of one block
    Definition: mimalloc-doc.h:766
    -
    size_t committed
    current committed bytes of this area
    Definition: mimalloc-doc.h:764
    -
    size_t used
    bytes in use by allocated blocks
    Definition: mimalloc-doc.h:765
    -
    void * blocks
    start of the area containing heap blocks
    Definition: mimalloc-doc.h:762
    -
    size_t reserved
    bytes reserved for this area
    Definition: mimalloc-doc.h:763
    +
    1/* ----------------------------------------------------------------------------
    +
    2Copyright (c) 2018-2025, Microsoft Research, Daan Leijen
    +
    3This is free software; you can redistribute it and/or modify it under the
    +
    4terms of the MIT license. A copy of the license can be found in the file
    +
    5"LICENSE" at the root of this distribution.
    +
    6-----------------------------------------------------------------------------*/
    +
    7
    +
    8#error "documentation file only!"
    +
    9
    +
    10
    +
    92
    +
    93
    +
    97
    +
    98
    +
    102void mi_free(void* p);
    +
    103
    +
    108void* mi_malloc(size_t size);
    +
    109
    +
    114void* mi_zalloc(size_t size);
    +
    115
    +
    125void* mi_calloc(size_t count, size_t size);
    +
    126
    +
    139void* mi_realloc(void* p, size_t newsize);
    +
    140
    +
    151void* mi_recalloc(void* p, size_t count, size_t size);
    +
    152
    +
    166void* mi_expand(void* p, size_t newsize);
    +
    167
    +
    177void* mi_mallocn(size_t count, size_t size);
    +
    178
    +
    188void* mi_reallocn(void* p, size_t count, size_t size);
    +
    189
    +
    206void* mi_reallocf(void* p, size_t newsize);
    +
    207
    +
    208
    +
    217char* mi_strdup(const char* s);
    +
    218
    +
    228char* mi_strndup(const char* s, size_t n);
    +
    229
    +
    242char* mi_realpath(const char* fname, char* resolved_name);
    +
    243
    +
    245
    +
    246// ------------------------------------------------------
    +
    247// Extended functionality
    +
    248// ------------------------------------------------------
    +
    249
    +
    253
    +
    256#define MI_SMALL_SIZE_MAX (128*sizeof(void*))
    +
    257
    +
    265void* mi_malloc_small(size_t size);
    +
    266
    +
    274void* mi_zalloc_small(size_t size);
    +
    275
    +
    289size_t mi_usable_size(void* p);
    +
    290
    +
    300size_t mi_good_size(size_t size);
    +
    301
    +
    309void mi_collect(bool force);
    +
    310
    +
    315void mi_stats_print(void* out);
    +
    316
    +
    322void mi_stats_print_out(mi_output_fun* out, void* arg);
    +
    323
    +
    325void mi_stats_reset(void);
    +
    326
    +
    328void mi_stats_merge(void);
    +
    329
    +
    333void mi_thread_init(void);
    +
    334
    +
    339void mi_thread_done(void);
    +
    340
    + +
    347
    +
    354typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
    +
    355
    +
    371void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
    +
    372
    +
    378typedef void (mi_output_fun)(const char* msg, void* arg);
    +
    379
    +
    386void mi_register_output(mi_output_fun* out, void* arg);
    +
    387
    +
    393typedef void (mi_error_fun)(int err, void* arg);
    +
    394
    +
    410void mi_register_error(mi_error_fun* errfun, void* arg);
    +
    411
    +
    416bool mi_is_in_heap_region(const void* p);
    +
    417
    +
    426int mi_reserve_os_memory(size_t size, bool commit, bool allow_large);
    +
    427
    +
    439bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node);
    +
    440
    +
    453int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
    +
    454
    +
    467int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
    +
    468
    +
    469
    + +
    475
    +
    489void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults);
    +
    490
    +
    495void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge);
    +
    496
    +
    499typedef int mi_arena_id_t;
    +
    500
    +
    505void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
    +
    506
    +
    514int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id);
    +
    515
    +
    523int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id);
    +
    524
    +
    535bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id);
    +
    536
    + +
    541
    +
    552mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);
    +
    553
    +
    557typedef void* mi_subproc_id_t;
    +
    558
    + +
    561
    + +
    565
    + +
    570
    + +
    574
    +
    575
    +
    577
    +
    578// ------------------------------------------------------
    +
    579// Aligned allocation
    +
    580// ------------------------------------------------------
    +
    581
    +
    589
    +
    607void* mi_malloc_aligned(size_t size, size_t alignment);
    +
    608void* mi_zalloc_aligned(size_t size, size_t alignment);
    +
    609void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
    +
    610void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
    +
    611
    +
    623void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
    +
    624void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
    +
    625void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
    +
    626void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
    +
    627
    +
    629
    +
    635
    +
    640struct mi_heap_s;
    +
    641
    +
    646typedef struct mi_heap_s mi_heap_t;
    +
    647
    + +
    650
    + +
    659
    + +
    668
    + +
    673
    + +
    677
    + +
    684
    +
    686void mi_heap_collect(mi_heap_t* heap, bool force);
    +
    687
    +
    690void* mi_heap_malloc(mi_heap_t* heap, size_t size);
    +
    691
    +
    695void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
    +
    696
    +
    699void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
    +
    700
    +
    703void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
    +
    704
    +
    707void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
    +
    708
    +
    711char* mi_heap_strdup(mi_heap_t* heap, const char* s);
    +
    712
    +
    715char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
    +
    716
    +
    719char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
    +
    720
    +
    721void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
    +
    722void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
    +
    723void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
    +
    724
    +
    725void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
    +
    726void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
    +
    727void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
    +
    728void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
    +
    729void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
    +
    730void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
    +
    731void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
    +
    732void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
    +
    733
    +
    735
    +
    736
    +
    745
    +
    746void* mi_rezalloc(void* p, size_t newsize);
    +
    747void* mi_recalloc(void* p, size_t newcount, size_t size) ;
    +
    748
    +
    749void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
    +
    750void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
    +
    751void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
    +
    752void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
    +
    753
    +
    754void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
    +
    755void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
    +
    756
    +
    757void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
    +
    758void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
    +
    759void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
    +
    760void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
    +
    761
    +
    763
    +
    772
    +
    784#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
    +
    785
    +
    787#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
    +
    788
    +
    790#define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
    +
    791
    +
    793#define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
    +
    794
    +
    796#define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
    +
    797
    +
    799#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
    +
    800
    +
    802#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
    +
    803
    +
    805#define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
    +
    806
    +
    808#define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
    +
    809
    +
    811#define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
    +
    812
    +
    814#define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
    +
    815
    +
    817
    +
    823
    +
    830bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
    +
    831
    +
    840bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
    +
    841
    +
    849bool mi_check_owned(const void* p);
    +
    850
    +
    +
    853typedef struct mi_heap_area_s {
    +
    854 void* blocks;
    +
    855 size_t reserved;
    +
    856 size_t committed;
    +
    857 size_t used;
    +
    858 size_t block_size;
    + + + +
    +
    862
    +
    870typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
    +
    871
    +
    883bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
    +
    884
    +
    900bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
    +
    901
    +
    903
    +
    909
    + +
    948
    +
    949
    + + + +
    953void mi_option_set_enabled(mi_option_t option, bool enable);
    + +
    955
    + +
    957long mi_option_get_clamp(mi_option_t option, long min, long max);
    + +
    959
    +
    960void mi_option_set(mi_option_t option, long value);
    +
    961void mi_option_set_default(mi_option_t option, long value);
    +
    962
    +
    963
    +
    965
    +
    972
    +
    974void mi_cfree(void* p);
    +
    975void* mi__expand(void* p, size_t newsize);
    +
    976
    +
    977void* mi_recalloc(void* p, size_t count, size_t size);
    +
    978size_t mi_malloc_size(const void* p);
    +
    979size_t mi_malloc_good_size(size_t size);
    +
    980size_t mi_malloc_usable_size(const void *p);
    +
    981
    +
    982int mi_posix_memalign(void** p, size_t alignment, size_t size);
    +
    983int mi__posix_memalign(void** p, size_t alignment, size_t size);
    +
    984void* mi_memalign(size_t alignment, size_t size);
    +
    985void* mi_valloc(size_t size);
    +
    986void* mi_pvalloc(size_t size);
    +
    987void* mi_aligned_alloc(size_t alignment, size_t size);
    +
    988
    +
    989unsigned short* mi_wcsdup(const unsigned short* s);
    +
    990unsigned char* mi_mbsdup(const unsigned char* s);
    +
    991int mi_dupenv_s(char** buf, size_t* size, const char* name);
    +
    992int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name);
    +
    993
    +
    996void* mi_reallocarray(void* p, size_t count, size_t size);
    +
    997
    +
    999int mi_reallocarr(void* p, size_t count, size_t size);
    +
    1000
    +
    1001void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment);
    +
    1002void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
    +
    1003
    +
    1004void mi_free_size(void* p, size_t size);
    +
    1005void mi_free_size_aligned(void* p, size_t size, size_t alignment);
    +
    1006void mi_free_aligned(void* p, size_t alignment);
    +
    1007
    +
    1009
    +
    1022
    +
    1024void* mi_new(std::size_t n) noexcept(false);
    +
    1025
    +
    1027void* mi_new_n(size_t count, size_t size) noexcept(false);
    +
    1028
    +
    1030void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
    +
    1031
    +
    1033void* mi_new_nothrow(size_t n);
    +
    1034
    +
    1036void* mi_new_aligned_nothrow(size_t n, size_t alignment);
    +
    1037
    +
    1039void* mi_new_realloc(void* p, size_t newsize);
    +
    1040
    +
    1042void* mi_new_reallocn(void* p, size_t newcount, size_t size);
    +
    1043
    +
    1051template<class T> struct mi_stl_allocator { }
    +
    1052
    +
    1054
    +
    1113
    +
    1198
    +
    1263
    +
    1433
    +
    void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
    Allocate size bytes aligned by alignment at a specified offset.
    +
    void * mi_calloc_aligned(size_t count, size_t size, size_t alignment)
    +
    void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
    +
    void * mi_malloc_aligned(size_t size, size_t alignment)
    Allocate size bytes aligned by alignment.
    +
    void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
    +
    void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
    +
    void * mi_zalloc_aligned(size_t size, size_t alignment)
    +
    void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
    +
    int heap_tag
    heap tag associated with this area (see mi_heap_new_ex)
    Definition mimalloc-doc.h:860
    +
    size_t block_size
    size in bytes of one block
    Definition mimalloc-doc.h:858
    +
    size_t committed
    current committed bytes of this area
    Definition mimalloc-doc.h:856
    +
    size_t full_block_size
    size in bytes of a full block including padding and metadata.
    Definition mimalloc-doc.h:859
    +
    size_t used
    bytes in use by allocated blocks
    Definition mimalloc-doc.h:857
    +
    void * blocks
    start of the area containing heap blocks
    Definition mimalloc-doc.h:854
    +
    size_t reserved
    bytes reserved for this area
    Definition mimalloc-doc.h:855
    bool mi_heap_check_owned(mi_heap_t *heap, const void *p)
    Check safely if any pointer is part of a heap.
    bool mi_check_owned(const void *p)
    Check safely if any pointer is part of the default heap of this thread.
    +
    bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun *visitor, void *arg)
    Visit all areas and blocks in abandoned heaps.
    bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
    Visit all areas and blocks in a heap.
    +
    bool mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
    Visitor function passed to mi_heap_visit_blocks()
    Definition mimalloc-doc.h:870
    bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
    Does a heap contain a pointer to a previously allocated block?
    -
    bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
    Visitor function passed to mi_heap_visit_blocks()
    Definition: mimalloc-doc.h:776
    -
    An area of heap space contains blocks of a single size.
    Definition: mimalloc-doc.h:761
    -
    void * mi_new_reallocn(void *p, size_t newcount, size_t size)
    like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc excepti...
    -
    void * mi_new_realloc(void *p, size_t newsize)
    like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
    -
    void * mi_new(std::size_t n) noexcept(false)
    like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception...
    -
    void * mi_new_aligned_nothrow(size_t n, size_t alignment)
    like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
    -
    void * mi_new_n(size_t count, size_t size) noexcept(false)
    like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
    -
    void * mi_new_nothrow(size_t n)
    like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
    -
    void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
    like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc e...
    -
    std::allocator implementation for mimalloc for use in STL containers.
    Definition: mimalloc-doc.h:912
    +
    An area of heap space contains blocks of a single size.
    Definition mimalloc-doc.h:853
    +
    void * mi_new_nothrow(size_t n)
    like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
    +
    void * mi_new(std::size_t n) noexcept(false)
    like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception...
    +
    void * mi_new_realloc(void *p, size_t newsize)
    like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
    +
    void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
    like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc e...
    +
    void * mi_new_aligned_nothrow(size_t n, size_t alignment)
    like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
    +
    void * mi_new_reallocn(void *p, size_t newcount, size_t size)
    like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc excepti...
    +
    void * mi_new_n(size_t count, size_t size) noexcept(false)
    like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
    +
    std::allocator implementation for mimalloc for use in STL containers.
    Definition mimalloc-doc.h:1051
    int mi_reserve_os_memory(size_t size, bool commit, bool allow_large)
    Reserve OS memory for use by mimalloc.
    size_t mi_usable_size(void *p)
    Return the available bytes in a memory block.
    void mi_thread_done(void)
    Uninitialize mimalloc on a thread.
    -
    void * mi_zalloc_small(size_t size)
    Allocate a zero initialized small object.
    -
    void() mi_error_fun(int err, void *arg)
    Type of error callback functions.
    Definition: mimalloc-doc.h:391
    -
    void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
    Type of deferred free functions.
    Definition: mimalloc-doc.h:352
    +
    void mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
    Type of deferred free functions.
    Definition mimalloc-doc.h:354
    void mi_stats_print(void *out)
    Deprecated.
    +
    mi_subproc_id_t mi_subproc_main(void)
    Get the main sub-process identifier.
    int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs)
    Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most t...
    +
    int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t *arena_id)
    Reserve OS memory to be managed in an arena.
    void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
    Register a deferred free function.
    +
    mi_heap_t * mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id)
    Create a new heap.
    void mi_stats_reset(void)
    Reset statistics.
    +
    bool mi_manage_os_memory_ex(void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t *arena_id)
    Manage externally allocated memory as a mimalloc arena.
    void mi_collect(bool force)
    Eagerly free memory.
    bool mi_manage_os_memory(void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node)
    Manage a particular memory area for use by mimalloc.
    +
    void * mi_zalloc_small(size_t size)
    Allocate a zero initialized small object.
    void mi_stats_print_out(mi_output_fun *out, void *arg)
    Print the main statistics.
    +
    int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t *arena_id)
    Reserve huge OS pages (1GiB) into a single arena.
    bool mi_is_in_heap_region(const void *p)
    Is a pointer part of our heap?
    -
    void * mi_malloc_small(size_t size)
    Allocate a small object.
    int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs)
    Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs ...
    void mi_process_info(size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
    Return process information (time and memory usage).
    +
    void * mi_malloc_small(size_t size)
    Allocate a small object.
    +
    mi_subproc_id_t mi_subproc_new(void)
    Create a fresh sub-process (with no associated threads yet).
    +
    void mi_error_fun(int err, void *arg)
    Type of error callback functions.
    Definition mimalloc-doc.h:393
    void mi_stats_merge(void)
    Merge thread local statistics with the main statistics and reset.
    +
    void * mi_subproc_id_t
    A process can associate threads with sub-processes.
    Definition mimalloc-doc.h:557
    +
    int mi_arena_id_t
    Mimalloc uses large (virtual) memory areas, called "arena"s, from the OS to manage its memory.
    Definition mimalloc-doc.h:499
    +
    void * mi_arena_area(mi_arena_id_t arena_id, size_t *size)
    Return the size of an arena.
    void mi_register_error(mi_error_fun *errfun, void *arg)
    Register an error callback function.
    +
    void mi_subproc_delete(mi_subproc_id_t subproc)
    Delete a previously created sub-process.
    bool mi_is_redirected()
    Is the C runtime malloc API redirected?
    +
    mi_heap_t * mi_heap_new_in_arena(mi_arena_id_t arena_id)
    Create a new heap that only allocates in the specified arena.
    void mi_thread_stats_print_out(mi_output_fun *out, void *arg)
    Print out heap statistics for this thread.
    size_t mi_good_size(size_t size)
    Return the used allocation size.
    -
    void() mi_output_fun(const char *msg, void *arg)
    Type of output functions.
    Definition: mimalloc-doc.h:376
    +
    void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge)
    Show all current arena's.
    +
    void mi_subproc_add_current_thread(mi_subproc_id_t subproc)
    Add the current thread to the given sub-process.
    +
    void mi_output_fun(const char *msg, void *arg)
    Type of output functions.
    Definition mimalloc-doc.h:378
    void mi_register_output(mi_output_fun *out, void *arg)
    Register an output function.
    void mi_thread_init(void)
    Initialize mimalloc on a thread.
    -
    char * mi_heap_realpath(mi_heap_t *heap, const char *fname, char *resolved_name)
    Resolve a file path name using a specific heap to allocate the result.
    -
    void * mi_heap_calloc_aligned_at(mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
    -
    char * mi_heap_strdup(mi_heap_t *heap, const char *s)
    Duplicate a string in a specific heap.
    -
    void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
    +
    void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
    Allocate a small object in a specific heap.
    +
    mi_heap_t * mi_heap_get_default()
    Get the default heap that is used for mi_malloc() et al.
    void mi_heap_delete(mi_heap_t *heap)
    Delete a previously allocated heap.
    -
    struct mi_heap_s mi_heap_t
    Type of first-class heaps.
    Definition: mimalloc-doc.h:554
    -
    void * mi_heap_zalloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
    -
    void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
    -
    void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
    -
    mi_heap_t * mi_heap_get_backing()
    Get the backing heap.
    -
    mi_heap_t * mi_heap_new()
    Create a new heap that can be used for allocation.
    +
    void * mi_heap_malloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
    +
    mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
    Set the default heap to use in the current thread for mi_malloc() et al.
    +
    struct mi_heap_s mi_heap_t
    Type of first-class heaps.
    Definition mimalloc-doc.h:646
    +
    void * mi_heap_zalloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
    +
    char * mi_heap_realpath(mi_heap_t *heap, const char *fname, char *resolved_name)
    Resolve a file path name using a specific heap to allocate the result.
    +
    char * mi_heap_strdup(mi_heap_t *heap, const char *s)
    Duplicate a string in a specific heap.
    +
    void * mi_heap_zalloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
    +
    void * mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
    void mi_heap_collect(mi_heap_t *heap, bool force)
    Release outstanding resources in a specific heap.
    -
    void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
    Allocate count elements in a specific heap.
    -
    mi_heap_t * mi_heap_get_default()
    Get the default heap that is used for mi_malloc() et al.
    -
    char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
    Duplicate a string of at most length n in a specific heap.
    -
    void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
    Allocate zero-initialized in a specific heap.
    -
    void * mi_heap_malloc(mi_heap_t *heap, size_t size)
    Allocate in a specific heap.
    void mi_heap_destroy(mi_heap_t *heap)
    Destroy a heap, freeing all its still allocated blocks.
    -
    void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
    Allocate a small object in a specific heap.
    -
    void * mi_heap_zalloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
    -
    void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
    Allocate count zero-initialized elements in a specific heap.
    -
    void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
    -
    void * mi_heap_malloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
    -
    mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
    Set the default heap to use for mi_malloc() et al.
    -
    void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
    -
    void * mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
    -
    void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
    -
    char * mi_realpath(const char *fname, char *resolved_name)
    Resolve a file path name.
    -
    void * mi_mallocn(size_t count, size_t size)
    Allocate count elements of size bytes.
    +
    void * mi_heap_calloc_aligned_at(mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
    +
    mi_heap_t * mi_heap_new()
    Create a new heap that can be used for allocation.
    +
    void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
    Allocate count elements in a specific heap.
    +
    void * mi_heap_malloc(mi_heap_t *heap, size_t size)
    Allocate in a specific heap.
    +
    void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
    Allocate zero-initialized in a specific heap.
    +
    void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
    Allocate count zero-initialized elements in a specific heap.
    +
    void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
    +
    mi_heap_t * mi_heap_get_backing()
    Get the backing heap.
    +
    void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
    +
    void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
    +
    void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
    +
    char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
    Duplicate a string of at most length n in a specific heap.
    +
    void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
    +
    void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
    +
    void * mi_realloc(void *p, size_t newsize)
    Re-allocate memory to newsize bytes.
    +
    void * mi_expand(void *p, size_t newsize)
    Try to re-allocate memory to newsize bytes in place.
    void * mi_recalloc(void *p, size_t count, size_t size)
    Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
    -
    void * mi_malloc(size_t size)
    Allocate size bytes.
    -
    void * mi_reallocn(void *p, size_t count, size_t size)
    Re-allocate memory to count elements of size bytes.
    -
    void * mi_calloc(size_t count, size_t size)
    Allocate zero-initialized count elements of size bytes.
    -
    char * mi_strndup(const char *s, size_t n)
    Allocate and duplicate a string up to n bytes.
    -
    void * mi_expand(void *p, size_t newsize)
    Try to re-allocate memory to newsize bytes in place.
    -
    char * mi_strdup(const char *s)
    Allocate and duplicate a string.
    -
    void * mi_realloc(void *p, size_t newsize)
    Re-allocate memory to newsize bytes.
    +
    char * mi_strdup(const char *s)
    Allocate and duplicate a string.
    +
    char * mi_strndup(const char *s, size_t n)
    Allocate and duplicate a string up to n bytes.
    +
    void * mi_reallocf(void *p, size_t newsize)
    Re-allocate memory to newsize bytes,.
    +
    void * mi_mallocn(size_t count, size_t size)
    Allocate count elements of size bytes.
    +
    void * mi_calloc(size_t count, size_t size)
    Allocate zero-initialized count elements of size bytes.
    +
    void * mi_reallocn(void *p, size_t count, size_t size)
    Re-allocate memory to count elements of size bytes.
    +
    char * mi_realpath(const char *fname, char *resolved_name)
    Resolve a file path name.
    +
    void * mi_malloc(size_t size)
    Allocate size bytes.
    +
    void * mi_zalloc(size_t size)
    Allocate zero-initialized size bytes.
    void mi_free(void *p)
    Free previously allocated memory.
    -
    void * mi_zalloc(size_t size)
    Allocate zero-initialized size bytes.
    -
    void * mi_reallocf(void *p, size_t newsize)
    Re-allocate memory to newsize bytes,.
    void mi_option_enable(mi_option_t option)
    +
    size_t mi_option_get_size(mi_option_t option)
    bool mi_option_is_enabled(mi_option_t option)
    void mi_option_set_enabled_default(mi_option_t option, bool enable)
    long mi_option_get(mi_option_t option)
    void mi_option_set_default(mi_option_t option, long value)
    +
    long mi_option_get_clamp(mi_option_t option, long min, long max)
    void mi_option_set_enabled(mi_option_t option, bool enable)
    void mi_option_disable(mi_option_t option)
    void mi_option_set(mi_option_t option, long value)
    -
    mi_option_t
    Runtime options.
    Definition: mimalloc-doc.h:800
    -
    @ mi_option_show_stats
    Print statistics to stderr when the program is done.
    Definition: mimalloc-doc.h:803
    -
    @ mi_option_use_numa_nodes
    Pretend there are at most N NUMA nodes.
    Definition: mimalloc-doc.h:815
    -
    @ mi_option_reset_delay
    Delay in milli-seconds before resetting a page (100ms by default)
    Definition: mimalloc-doc.h:814
    -
    @ mi_option_eager_commit_delay
    Experimental.
    Definition: mimalloc-doc.h:817
    -
    @ mi_option_eager_commit
    Eagerly commit segments (4MiB) (enabled by default).
    Definition: mimalloc-doc.h:806
    -
    @ mi_option_segment_cache
    The number of segments per thread to keep cached.
    Definition: mimalloc-doc.h:811
    -
    @ mi_option_eager_region_commit
    Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
    Definition: mimalloc-doc.h:807
    -
    @ mi_option_large_os_pages
    Use large OS pages (2MiB in size) if possible.
    Definition: mimalloc-doc.h:808
    -
    @ mi_option_os_tag
    OS tag to assign to mimalloc'd memory.
    Definition: mimalloc-doc.h:818
    -
    @ _mi_option_last
    Definition: mimalloc-doc.h:819
    -
    @ mi_option_verbose
    Print verbose messages to stderr.
    Definition: mimalloc-doc.h:804
    -
    @ mi_option_reserve_huge_os_pages_at
    Reserve huge OS pages at node N.
    Definition: mimalloc-doc.h:810
    -
    @ mi_option_reset_decommits
    Experimental.
    Definition: mimalloc-doc.h:816
    -
    @ mi_option_reserve_huge_os_pages
    The number of huge OS pages (1GiB in size) to reserve at the start of the program.
    Definition: mimalloc-doc.h:809
    -
    @ mi_option_page_reset
    Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
    Definition: mimalloc-doc.h:812
    -
    @ mi_option_segment_reset
    Experimental.
    Definition: mimalloc-doc.h:813
    -
    @ mi_option_show_errors
    Print error messages to stderr.
    Definition: mimalloc-doc.h:802
    +
    mi_option_t
    Runtime options.
    Definition mimalloc-doc.h:911
    +
    @ mi_option_abandoned_reclaim_on_free
    allow to reclaim an abandoned segment on a free (=1)
    Definition mimalloc-doc.h:941
    +
    @ mi_option_purge_extend_delay
    extend purge delay on each subsequent delay (=1)
    Definition mimalloc-doc.h:942
    +
    @ mi_option_show_stats
    Print statistics on termination.
    Definition mimalloc-doc.h:914
    +
    @ mi_option_use_numa_nodes
    0 = use all available numa nodes, otherwise use at most N nodes.
    Definition mimalloc-doc.h:935
    +
    @ mi_option_abandoned_page_purge
    immediately purge delayed purges on thread termination
    Definition mimalloc-doc.h:933
    +
    @ mi_option_eager_commit_delay
    the first N segments per thread are not eagerly committed (but per page in the segment on demand)
    Definition mimalloc-doc.h:931
    +
    @ mi_option_eager_commit
    eager commit segments? (after eager_commit_delay segments) (enabled by default).
    Definition mimalloc-doc.h:930
    +
    @ mi_option_visit_abandoned
    allow visiting heap blocks from abandoned threads (=0)
    Definition mimalloc-doc.h:944
    +
    @ mi_option_os_tag
    tag used for OS logging (macOS only for now) (=100)
    Definition mimalloc-doc.h:926
    +
    @ _mi_option_last
    Definition mimalloc-doc.h:946
    +
    @ mi_option_destroy_on_exit
    if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe
    Definition mimalloc-doc.h:939
    +
    @ mi_option_verbose
    Print verbose messages.
    Definition mimalloc-doc.h:915
    +
    @ mi_option_allow_large_os_pages
    allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process.
    Definition mimalloc-doc.h:923
    +
    @ mi_option_arena_purge_mult
    multiplier for purge_delay for the purging delay for arenas (=10)
    Definition mimalloc-doc.h:940
    +
    @ mi_option_retry_on_oom
    retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
    Definition mimalloc-doc.h:927
    +
    @ mi_option_purge_decommits
    should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit)
    Definition mimalloc-doc.h:924
    +
    @ mi_option_limit_os_alloc
    If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
    Definition mimalloc-doc.h:937
    +
    @ mi_option_reserve_huge_os_pages_at
    Reserve N huge OS pages at a specific NUMA node N.
    Definition mimalloc-doc.h:921
    +
    @ mi_option_max_segment_reclaim
    max. percentage of the abandoned segments can be reclaimed per try (=10%)
    Definition mimalloc-doc.h:938
    +
    @ mi_option_arena_reserve
    initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use ...
    Definition mimalloc-doc.h:925
    +
    @ mi_option_reserve_huge_os_pages
    reserve N huge OS pages (1GiB pages) at startup
    Definition mimalloc-doc.h:920
    +
    @ mi_option_disallow_os_alloc
    1 = do not use OS memory for allocation (but only programmatically reserved arenas)
    Definition mimalloc-doc.h:936
    +
    @ mi_option_purge_delay
    memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all...
    Definition mimalloc-doc.h:934
    +
    @ mi_option_disallow_arena_alloc
    1 = do not use arena's for allocation (except if using specific arena id's)
    Definition mimalloc-doc.h:943
    +
    @ mi_option_max_errors
    issue at most N error messages
    Definition mimalloc-doc.h:916
    +
    @ mi_option_max_warnings
    issue at most N warning messages
    Definition mimalloc-doc.h:917
    +
    @ mi_option_show_errors
    Print error messages.
    Definition mimalloc-doc.h:913
    +
    @ mi_option_reserve_os_memory
    reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use m...
    Definition mimalloc-doc.h:922
    +
    @ mi_option_arena_eager_commit
    eager commit arenas? Use 2 to enable just on overcommit systems (=2)
    Definition mimalloc-doc.h:932
    size_t mi_malloc_usable_size(const void *p)
    void mi_free_aligned(void *p, size_t alignment)
    -
    void * mi_aligned_alloc(size_t alignment, size_t size)
    +
    void * mi_aligned_offset_recalloc(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
    +
    void * mi_aligned_alloc(size_t alignment, size_t size)
    size_t mi_malloc_size(const void *p)
    -
    void * mi_reallocarray(void *p, size_t count, size_t size)
    Correspond s to reallocarray in FreeBSD.
    +
    void * mi_valloc(size_t size)
    +
    void * mi_pvalloc(size_t size)
    +
    void * mi__expand(void *p, size_t newsize)
    +
    int mi_wdupenv_s(unsigned short **buf, size_t *size, const unsigned short *name)
    void mi_cfree(void *p)
    Just as free but also checks if the pointer p belongs to our heap.
    +
    void * mi_memalign(size_t alignment, size_t size)
    void mi_free_size_aligned(void *p, size_t size, size_t alignment)
    -
    void * mi_valloc(size_t size)
    +
    unsigned char * mi_mbsdup(const unsigned char *s)
    int mi_reallocarr(void *p, size_t count, size_t size)
    Corresponds to reallocarr in NetBSD.
    -
    void * mi_memalign(size_t alignment, size_t size)
    +
    size_t mi_malloc_good_size(size_t size)
    +
    unsigned short * mi_wcsdup(const unsigned short *s)
    +
    int mi_dupenv_s(char **buf, size_t *size, const char *name)
    int mi_posix_memalign(void **p, size_t alignment, size_t size)
    int mi__posix_memalign(void **p, size_t alignment, size_t size)
    +
    void * mi_reallocarray(void *p, size_t count, size_t size)
    Correspond s to reallocarray in FreeBSD.
    void mi_free_size(void *p, size_t size)
    -
    void * mi_pvalloc(size_t size)
    -
    void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
    -
    void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
    -
    void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
    -
    void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
    -
    void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
    -
    void * mi_rezalloc(void *p, size_t newsize)
    -
    void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
    -
    void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
    -
    void * mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment)
    -
    void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
    -
    void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
    +
    void * mi_aligned_recalloc(void *p, size_t newcount, size_t size, size_t alignment)
    +
    void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
    +
    void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
    +
    void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
    +
    void * mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment)
    +
    void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
    +
    void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
    +
    void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
    +
    void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
    +
    void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
    +
    void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
    +
    void * mi_rezalloc(void *p, size_t newsize)
    diff --git a/depends/mimalloc/docs/mimalloc-doxygen.css b/depends/mimalloc/docs/mimalloc-doxygen.css index b24f5643268f..c889a8d2c326 100644 --- a/depends/mimalloc/docs/mimalloc-doxygen.css +++ b/depends/mimalloc/docs/mimalloc-doxygen.css @@ -47,3 +47,14 @@ div.fragment { #nav-sync img { display: none; } +h1,h2,h3,h4,h5,h6 { + transition:none; +} +.memtitle { + background-image: none; + background-color: #EEE; +} +table.memproto, .memproto { + text-shadow: none; + font-size: 110%; +} diff --git a/depends/mimalloc/docs/modules.html b/depends/mimalloc/docs/modules.html index 7457cb9fd23f..0129057cd1a7 100644 --- a/depends/mimalloc/docs/modules.html +++ b/depends/mimalloc/docs/modules.html @@ -43,8 +43,8 @@ onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> @@ -69,7 +69,7 @@

    -
    @@ -88,7 +88,7 @@
    -
    diff --git a/depends/mimalloc/docs/navtree.css b/depends/mimalloc/docs/navtree.css index 046366ca230a..5ec66982ce29 100644 --- a/depends/mimalloc/docs/navtree.css +++ b/depends/mimalloc/docs/navtree.css @@ -22,10 +22,15 @@ #nav-tree .selected { background-image: url('tab_a.png'); background-repeat:repeat-x; - color: #fff; + color: white; text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0); } +#nav-tree .selected .arrow { + color: #5B6364; + text-shadow: none; +} + #nav-tree img { margin:0px; padding:0px; @@ -37,7 +42,6 @@ text-decoration:none; padding:0px; margin:0px; - outline:none; } #nav-tree .label { @@ -52,7 +56,7 @@ #nav-tree .selected a { text-decoration:none; - color:#fff; + color:white; } #nav-tree .children_ul { @@ -67,7 +71,6 @@ #nav-tree { padding: 0px 0px; - background-color: #FAFAFF; font-size:14px; overflow:auto; } @@ -86,7 +89,8 @@ display:block; position: absolute; left: 0px; - width: 180px; + width: $width; + overflow : hidden; } .ui-resizable .ui-resizable-handle { @@ -94,7 +98,7 @@ } .ui-resizable-e { - background-image:url("splitbar.png"); + background-image:url('splitbar.png'); background-size:100%; background-repeat:repeat-y; background-attachment: scroll; @@ -117,7 +121,6 @@ } #nav-tree { - background-image:url('nav_h.png'); background-repeat:repeat-x; background-color: #F2F3F3; -webkit-overflow-scrolling : touch; /* iOS 5+ */ diff --git a/depends/mimalloc/docs/navtree.js b/depends/mimalloc/docs/navtree.js index 1e272d31d4aa..2d4fa84a55d2 100644 --- a/depends/mimalloc/docs/navtree.js +++ b/depends/mimalloc/docs/navtree.js @@ -22,525 +22,462 @@ @licend The above is the entire license notice for the JavaScript code in this file */ -var navTreeSubIndices = new Array(); -var arrowDown = '▼'; -var arrowRight = '►'; - -function getData(varName) -{ - var i = varName.lastIndexOf('/'); - var n = i>=0 ? varName.substring(i+1) : varName; - return eval(n.replace(/\-/g,'_')); -} -function stripPath(uri) -{ - return uri.substring(uri.lastIndexOf('/')+1); -} +function initNavTree(toroot,relpath) { + let navTreeSubIndices = []; + const ARROW_DOWN = '▼'; + const ARROW_RIGHT = '►'; + const NAVPATH_COOKIE_NAME = ''+'navpath'; -function stripPath2(uri) -{ - var i = uri.lastIndexOf('/'); - var s = uri.substring(i+1); - var m = uri.substring(0,i+1).match(/\/d\w\/d\w\w\/$/); - return m ? uri.substring(i-6) : s; -} + const getData = function(varName) { + const i = varName.lastIndexOf('/'); + const n = i>=0 ? varName.substring(i+1) : varName; + return eval(n.replace(/-/g,'_')); + } -function hashValue() -{ - return $(location).attr('hash').substring(1).replace(/[^\w\-]/g,''); -} + const stripPath = function(uri) { + return uri.substring(uri.lastIndexOf('/')+1); + } -function hashUrl() -{ - return '#'+hashValue(); -} + const stripPath2 = function(uri) { + const i = uri.lastIndexOf('/'); + const s = uri.substring(i+1); + const m = uri.substring(0,i+1).match(/\/d\w\/d\w\w\/$/); + return m ? uri.substring(i-6) : s; + } -function pathName() -{ - return $(location).attr('pathname').replace(/[^-A-Za-z0-9+&@#/%?=~_|!:,.;\(\)]/g, ''); -} + const hashValue = function() { + return $(location).attr('hash').substring(1).replace(/[^\w-]/g,''); + } -function localStorageSupported() -{ - try { - return 'localStorage' in window && window['localStorage'] !== null && window.localStorage.getItem; + const hashUrl = function() { + return '#'+hashValue(); } - catch(e) { - return false; + + const pathName = function() { + return $(location).attr('pathname').replace(/[^-A-Za-z0-9+&@#/%?=~_|!:,.;()]/g, ''); } -} -function storeLink(link) -{ - if (!$("#nav-sync").hasClass('sync') && localStorageSupported()) { - window.localStorage.setItem('navpath',link); + const storeLink = function(link) { + if (!$("#nav-sync").hasClass('sync')) { + Cookie.writeSetting(NAVPATH_COOKIE_NAME,link,0); + } } -} -function deleteLink() -{ - if (localStorageSupported()) { - window.localStorage.setItem('navpath',''); + const deleteLink = function() { + Cookie.eraseSetting(NAVPATH_COOKIE_NAME); } -} -function cachedLink() -{ - if (localStorageSupported()) { - return window.localStorage.getItem('navpath'); - } else { - return ''; + const cachedLink = function() { + return Cookie.readSetting(NAVPATH_COOKIE_NAME,''); } -} -function getScript(scriptName,func,show) -{ - var head = document.getElementsByTagName("head")[0]; - var script = document.createElement('script'); - script.id = scriptName; - script.type = 'text/javascript'; - script.onload = func; - script.src = scriptName+'.js'; - head.appendChild(script); -} + const getScript = function(scriptName,func) { + const head = document.getElementsByTagName("head")[0]; + const script = document.createElement('script'); + script.id = scriptName; + script.type = 'text/javascript'; + script.onload = func; + script.src = scriptName+'.js'; + head.appendChild(script); + } -function createIndent(o,domNode,node,level) -{ - var level=-1; - var n = node; - while (n.parentNode) { level++; n=n.parentNode; } - if (node.childrenData) { - var imgNode = document.createElement("span"); - imgNode.className = 'arrow'; - imgNode.style.paddingLeft=(16*level).toString()+'px'; - imgNode.innerHTML=arrowRight; - node.plus_img = imgNode; - node.expandToggle = document.createElement("a"); - node.expandToggle.href = "javascript:void(0)"; - node.expandToggle.onclick = function() { - if (node.expanded) { - $(node.getChildrenUL()).slideUp("fast"); - node.plus_img.innerHTML=arrowRight; - node.expanded = false; - } else { - expandNode(o, node, false, false); + const createIndent = function(o,domNode,node) { + let level=-1; + let n = node; + while (n.parentNode) { level++; n=n.parentNode; } + if (node.childrenData) { + const imgNode = document.createElement("span"); + imgNode.className = 'arrow'; + imgNode.style.paddingLeft=(16*level).toString()+'px'; + imgNode.innerHTML=ARROW_RIGHT; + node.plus_img = imgNode; + node.expandToggle = document.createElement("a"); + node.expandToggle.href = "javascript:void(0)"; + node.expandToggle.onclick = function() { + if (node.expanded) { + $(node.getChildrenUL()).slideUp("fast"); + node.plus_img.innerHTML=ARROW_RIGHT; + node.expanded = false; + } else { + expandNode(o, node, false, true); + } } + node.expandToggle.appendChild(imgNode); + domNode.appendChild(node.expandToggle); + } else { + let span = document.createElement("span"); + span.className = 'arrow'; + span.style.width = 16*(level+1)+'px'; + span.innerHTML = ' '; + domNode.appendChild(span); } - node.expandToggle.appendChild(imgNode); - domNode.appendChild(node.expandToggle); - } else { - var span = document.createElement("span"); - span.className = 'arrow'; - span.style.width = 16*(level+1)+'px'; - span.innerHTML = ' '; - domNode.appendChild(span); } -} -var animationInProgress = false; - -function gotoAnchor(anchor,aname,updateLocation) -{ - var pos, docContent = $('#doc-content'); - var ancParent = $(anchor.parent()); - if (ancParent.hasClass('memItemLeft') || - ancParent.hasClass('memtitle') || - ancParent.hasClass('fieldname') || - ancParent.hasClass('fieldtype') || - ancParent.is(':header')) - { - pos = ancParent.position().top; - } else if (anchor.position()) { - pos = anchor.position().top; - } - if (pos) { - var dist = Math.abs(Math.min( - pos-docContent.offset().top, - docContent[0].scrollHeight- - docContent.height()-docContent.scrollTop())); - animationInProgress=true; - docContent.animate({ - scrollTop: pos + docContent.scrollTop() - docContent.offset().top - },Math.max(50,Math.min(500,dist)),function(){ - if (updateLocation) window.location.href=aname; - animationInProgress=false; - }); - } -} - -function newNode(o, po, text, link, childrenData, lastNode) -{ - var node = new Object(); - node.children = Array(); - node.childrenData = childrenData; - node.depth = po.depth + 1; - node.relpath = po.relpath; - node.isLast = lastNode; - - node.li = document.createElement("li"); - po.getChildrenUL().appendChild(node.li); - node.parentNode = po; - - node.itemDiv = document.createElement("div"); - node.itemDiv.className = "item"; - - node.labelSpan = document.createElement("span"); - node.labelSpan.className = "label"; - - createIndent(o,node.itemDiv,node,0); - node.itemDiv.appendChild(node.labelSpan); - node.li.appendChild(node.itemDiv); - - var a = document.createElement("a"); - node.labelSpan.appendChild(a); - node.label = document.createTextNode(text); - node.expanded = false; - a.appendChild(node.label); - if (link) { - var url; - if (link.substring(0,1)=='^') { - url = link.substring(1); - link = url; - } else { - url = node.relpath+link; + let animationInProgress = false; + + const gotoAnchor = function(anchor,aname) { + let pos, docContent = $('#doc-content'); + let ancParent = $(anchor.parent()); + if (ancParent.hasClass('memItemLeft') || ancParent.hasClass('memtitle') || + ancParent.hasClass('fieldname') || ancParent.hasClass('fieldtype') || + ancParent.is(':header')) { + pos = ancParent.offset().top; + } else if (anchor.position()) { + pos = anchor.offset().top; } - a.className = stripPath(link.replace('#',':')); - if (link.indexOf('#')!=-1) { - var aname = '#'+link.split('#')[1]; - var srcPage = stripPath(pathName()); - var targetPage = stripPath(link.split('#')[0]); - a.href = srcPage!=targetPage ? url : "javascript:void(0)"; - a.onclick = function(){ - storeLink(link); - if (!$(a).parent().parent().hasClass('selected')) - { - $('.item').removeClass('selected'); - $('.item').removeAttr('id'); - $(a).parent().parent().addClass('selected'); - $(a).parent().parent().attr('id','selected'); + if (pos) { + const dcOffset = docContent.offset().top; + const dcHeight = docContent.height(); + const dcScrHeight = docContent[0].scrollHeight + const dcScrTop = docContent.scrollTop(); + let dist = Math.abs(Math.min(pos-dcOffset,dcScrHeight-dcHeight-dcScrTop)); + animationInProgress = true; + docContent.animate({ + scrollTop: pos + dcScrTop - dcOffset + },Math.max(50,Math.min(500,dist)),function() { + animationInProgress=false; + if (anchor.parent().attr('class')=='memItemLeft') { + let rows = $('.memberdecls tr[class$="'+hashValue()+'"]'); + glowEffect(rows.children(),300); // member without details + } else if (anchor.parent().attr('class')=='fieldname') { + glowEffect(anchor.parent().parent(),1000); // enum value + } else if (anchor.parent().attr('class')=='fieldtype') { + glowEffect(anchor.parent().parent(),1000); // struct field + } else if (anchor.parent().is(":header")) { + glowEffect(anchor.parent(),1000); // section header + } else { + glowEffect(anchor.next(),1000); // normal member } - var anchor = $(aname); - gotoAnchor(anchor,aname,true); - }; - } else { - a.href = url; - a.onclick = function() { storeLink(link); } + }); } - } else { - if (childrenData != null) - { + } + + const newNode = function(o, po, text, link, childrenData, lastNode) { + const node = { + children : [], + childrenData : childrenData, + depth : po.depth + 1, + relpath : po.relpath, + isLast : lastNode, + li : document.createElement("li"), + parentNode : po, + itemDiv : document.createElement("div"), + labelSpan : document.createElement("span"), + label : document.createTextNode(text), + expanded : false, + childrenUL : null, + getChildrenUL : function() { + if (!this.childrenUL) { + this.childrenUL = document.createElement("ul"); + this.childrenUL.className = "children_ul"; + this.childrenUL.style.display = "none"; + this.li.appendChild(node.childrenUL); + } + return node.childrenUL; + }, + }; + + node.itemDiv.className = "item"; + node.labelSpan.className = "label"; + createIndent(o,node.itemDiv,node); + node.itemDiv.appendChild(node.labelSpan); + node.li.appendChild(node.itemDiv); + + const a = document.createElement("a"); + node.labelSpan.appendChild(a); + po.getChildrenUL().appendChild(node.li); + a.appendChild(node.label); + if (link) { + let url; + if (link.substring(0,1)=='^') { + url = link.substring(1); + link = url; + } else { + url = node.relpath+link; + } + a.className = stripPath(link.replace('#',':')); + if (link.indexOf('#')!=-1) { + const aname = '#'+link.split('#')[1]; + const srcPage = stripPath(pathName()); + const targetPage = stripPath(link.split('#')[0]); + a.href = srcPage!=targetPage ? url : aname; + a.onclick = function() { + storeLink(link); + aPPar = $(a).parent().parent(); + if (!aPPar.hasClass('selected')) { + $('.item').removeClass('selected'); + $('.item').removeAttr('id'); + aPPar.addClass('selected'); + aPPar.attr('id','selected'); + } + const anchor = $(aname); + gotoAnchor(anchor,aname); + }; + } else { + a.href = url; + a.onclick = () => storeLink(link); + } + } else if (childrenData != null) { a.className = "nolink"; a.href = "javascript:void(0)"; a.onclick = node.expandToggle.onclick; } + return node; } - node.childrenUL = null; - node.getChildrenUL = function() { - if (!node.childrenUL) { - node.childrenUL = document.createElement("ul"); - node.childrenUL.className = "children_ul"; - node.childrenUL.style.display = "none"; - node.li.appendChild(node.childrenUL); - } - return node.childrenUL; - }; - - return node; -} - -function showRoot() -{ - var headerHeight = $("#top").height(); - var footerHeight = $("#nav-path").height(); - var windowHeight = $(window).height() - headerHeight - footerHeight; - (function (){ // retry until we can scroll to the selected item - try { - var navtree=$('#nav-tree'); - navtree.scrollTo('#selected',100,{offset:-windowHeight/2}); - } catch (err) { - setTimeout(arguments.callee, 0); - } - })(); -} + const showRoot = function() { + const headerHeight = $("#top").height(); + const footerHeight = $("#nav-path").height(); + const windowHeight = $(window).height() - headerHeight - footerHeight; + (function() { // retry until we can scroll to the selected item + try { + const navtree=$('#nav-tree'); + navtree.scrollTo('#selected',100,{offset:-windowHeight/2}); + } catch (err) { + setTimeout(arguments.callee, 0); + } + })(); + } -function expandNode(o, node, imm, showRoot) -{ - if (node.childrenData && !node.expanded) { - if (typeof(node.childrenData)==='string') { - var varName = node.childrenData; - getScript(node.relpath+varName,function(){ - node.childrenData = getData(varName); - expandNode(o, node, imm, showRoot); - }, showRoot); - } else { - if (!node.childrenVisited) { - getNode(o, node); + const expandNode = function(o, node, imm, setFocus) { + if (node.childrenData && !node.expanded) { + if (typeof(node.childrenData)==='string') { + const varName = node.childrenData; + getScript(node.relpath+varName,function() { + node.childrenData = getData(varName); + expandNode(o, node, imm, setFocus); + }); + } else { + if (!node.childrenVisited) { + getNode(o, node); + } + $(node.getChildrenUL()).slideDown("fast"); + node.plus_img.innerHTML = ARROW_DOWN; + node.expanded = true; + if (setFocus) { + $(node.expandToggle).focus(); + } } - $(node.getChildrenUL()).slideDown("fast"); - node.plus_img.innerHTML = arrowDown; - node.expanded = true; } } -} -function glowEffect(n,duration) -{ - n.addClass('glow').delay(duration).queue(function(next){ - $(this).removeClass('glow');next(); - }); -} - -function highlightAnchor() -{ - var aname = hashUrl(); - var anchor = $(aname); - if (anchor.parent().attr('class')=='memItemLeft'){ - var rows = $('.memberdecls tr[class$="'+hashValue()+'"]'); - glowEffect(rows.children(),300); // member without details - } else if (anchor.parent().attr('class')=='fieldname'){ - glowEffect(anchor.parent().parent(),1000); // enum value - } else if (anchor.parent().attr('class')=='fieldtype'){ - glowEffect(anchor.parent().parent(),1000); // struct field - } else if (anchor.parent().is(":header")) { - glowEffect(anchor.parent(),1000); // section header - } else { - glowEffect(anchor.next(),1000); // normal member + const glowEffect = function(n,duration) { + n.addClass('glow').delay(duration).queue(function(next) { + $(this).removeClass('glow');next(); + }); } -} -function selectAndHighlight(hash,n) -{ - var a; - if (hash) { - var link=stripPath(pathName())+':'+hash.substring(1); - a=$('.item a[class$="'+link+'"]'); + const highlightAnchor = function() { + const aname = hashUrl(); + const anchor = $(aname); + gotoAnchor(anchor,aname); } - if (a && a.length) { - a.parent().parent().addClass('selected'); - a.parent().parent().attr('id','selected'); - highlightAnchor(); - } else if (n) { - $(n.itemDiv).addClass('selected'); - $(n.itemDiv).attr('id','selected'); - } - if ($('#nav-tree-contents .item:first').hasClass('selected')) { - $('#nav-sync').css('top','30px'); - } else { - $('#nav-sync').css('top','5px'); + + const selectAndHighlight = function(hash,n) { + let a; + if (hash) { + const link=stripPath(pathName())+':'+hash.substring(1); + a=$('.item a[class$="'+link+'"]'); + } + if (a && a.length) { + a.parent().parent().addClass('selected'); + a.parent().parent().attr('id','selected'); + highlightAnchor(); + } else if (n) { + $(n.itemDiv).addClass('selected'); + $(n.itemDiv).attr('id','selected'); + } + let topOffset=5; + if ($('#nav-tree-contents .item:first').hasClass('selected')) { + topOffset+=25; + } + $('#nav-sync').css('top',topOffset+'px'); + showRoot(); } - showRoot(); -} -function showNode(o, node, index, hash) -{ - if (node && node.childrenData) { - if (typeof(node.childrenData)==='string') { - var varName = node.childrenData; - getScript(node.relpath+varName,function(){ - node.childrenData = getData(varName); - showNode(o,node,index,hash); - },true); - } else { - if (!node.childrenVisited) { - getNode(o, node); - } - $(node.getChildrenUL()).css({'display':'block'}); - node.plus_img.innerHTML = arrowDown; - node.expanded = true; - var n = node.children[o.breadcrumbs[index]]; - if (index+11) hash = '#'+parts[1].replace(/[^\w\-]/g,''); - else hash=''; + const getNode = function(o, po) { + const insertFunction = removeToInsertLater(po.li); + po.childrenVisited = true; + const l = po.childrenData.length-1; + for (let i in po.childrenData) { + const nodeData = po.childrenData[i]; + po.children[i] = newNode(o, po, nodeData[0], nodeData[1], nodeData[2], i==l); + } + insertFunction(); } - if (hash.match(/^#l\d+$/)) { - var anchor=$('a[name='+hash.substring(1)+']'); - glowEffect(anchor.parent(),1000); // line number - hash=''; // strip line number anchors + + const gotoNode = function(o,subIndex,root,hash,relpath) { + const nti = navTreeSubIndices[subIndex][root+hash]; + o.breadcrumbs = $.extend(true, [], nti ? nti : navTreeSubIndices[subIndex][root]); + if (!o.breadcrumbs && root!=NAVTREE[0][1]) { // fallback: show index + navTo(o,NAVTREE[0][1],"",relpath); + $('.item').removeClass('selected'); + $('.item').removeAttr('id'); + } + if (o.breadcrumbs) { + o.breadcrumbs.unshift(0); // add 0 for root node + showNode(o, o.node, 0, hash); + } } - var url=root+hash; - var i=-1; - while (NAVTREEINDEX[i+1]<=url) i++; - if (i==-1) { i=0; root=NAVTREE[0][1]; } // fallback: show index - if (navTreeSubIndices[i]) { - gotoNode(o,i,root,hash,relpath) - } else { - getScript(relpath+'navtreeindex'+i,function(){ - navTreeSubIndices[i] = eval('NAVTREEINDEX'+i); - if (navTreeSubIndices[i]) { - gotoNode(o,i,root,hash,relpath); - } - },true); + + const navTo = function(o,root,hash,relpath) { + const link = cachedLink(); + if (link) { + const parts = link.split('#'); + root = parts[0]; + hash = parts.length>1 ? '#'+parts[1].replace(/[^\w-]/g,'') : ''; + } + if (hash.match(/^#l\d+$/)) { + const anchor=$('a[name='+hash.substring(1)+']'); + glowEffect(anchor.parent(),1000); // line number + hash=''; // strip line number anchors + } + const url=root+hash; + let i=-1; + while (NAVTREEINDEX[i+1]<=url) i++; + if (i==-1) { i=0; root=NAVTREE[0][1]; } // fallback: show index + if (navTreeSubIndices[i]) { + gotoNode(o,i,root,hash,relpath) + } else { + getScript(relpath+'navtreeindex'+i,function() { + navTreeSubIndices[i] = eval('NAVTREEINDEX'+i); + if (navTreeSubIndices[i]) { + gotoNode(o,i,root,hash,relpath); + } + }); + } } -} -function showSyncOff(n,relpath) -{ + const showSyncOff = function(n,relpath) { n.html(''); -} + } -function showSyncOn(n,relpath) -{ + const showSyncOn = function(n,relpath) { n.html(''); -} + } -function toggleSyncButton(relpath) -{ - var navSync = $('#nav-sync'); - if (navSync.hasClass('sync')) { - navSync.removeClass('sync'); + const o = { + toroot : toroot, + node : { + childrenData : NAVTREE, + children : [], + childrenUL : document.createElement("ul"), + getChildrenUL : function() { return this.childrenUL }, + li : document.getElementById("nav-tree-contents"), + depth : 0, + relpath : relpath, + expanded : false, + isLast : true, + plus_img : document.createElement("span"), + }, + }; + o.node.li.appendChild(o.node.childrenUL); + o.node.plus_img.className = 'arrow'; + o.node.plus_img.innerHTML = ARROW_RIGHT; + + const navSync = $('#nav-sync'); + if (cachedLink()) { showSyncOff(navSync,relpath); - storeLink(stripPath2(pathName())+hashUrl()); + navSync.removeClass('sync'); } else { - navSync.addClass('sync'); showSyncOn(navSync,relpath); - deleteLink(); - } -} - -var loadTriggered = false; -var readyTriggered = false; -var loadObject,loadToRoot,loadUrl,loadRelPath; - -$(window).on('load',function(){ - if (readyTriggered) { // ready first - navTo(loadObject,loadToRoot,loadUrl,loadRelPath); - showRoot(); } - loadTriggered=true; -}); - -function initNavTree(toroot,relpath) -{ - var o = new Object(); - o.toroot = toroot; - o.node = new Object(); - o.node.li = document.getElementById("nav-tree-contents"); - o.node.childrenData = NAVTREE; - o.node.children = new Array(); - o.node.childrenUL = document.createElement("ul"); - o.node.getChildrenUL = function() { return o.node.childrenUL; }; - o.node.li.appendChild(o.node.childrenUL); - o.node.depth = 0; - o.node.relpath = relpath; - o.node.expanded = false; - o.node.isLast = true; - o.node.plus_img = document.createElement("span"); - o.node.plus_img.className = 'arrow'; - o.node.plus_img.innerHTML = arrowRight; - if (localStorageSupported()) { - var navSync = $('#nav-sync'); - if (cachedLink()) { - showSyncOff(navSync,relpath); + navSync.click(() => { + const navSync = $('#nav-sync'); + if (navSync.hasClass('sync')) { navSync.removeClass('sync'); + showSyncOff(navSync,relpath); + storeLink(stripPath2(pathName())+hashUrl()); } else { + navSync.addClass('sync'); showSyncOn(navSync,relpath); + deleteLink(); } - navSync.click(function(){ toggleSyncButton(relpath); }); - } + }); - if (loadTriggered) { // load before ready - navTo(o,toroot,hashUrl(),relpath); - showRoot(); - } else { // ready before load - loadObject = o; - loadToRoot = toroot; - loadUrl = hashUrl(); - loadRelPath = relpath; - readyTriggered=true; - } + navTo(o,toroot,hashUrl(),relpath); + showRoot(); - $(window).bind('hashchange', function(){ - if (window.location.hash && window.location.hash.length>1){ - var a; - if ($(location).attr('hash')){ - var clslink=stripPath(pathName())+':'+hashValue(); - a=$('.item a[class$="'+clslink.replace(/ { + if (!animationInProgress) { + if (window.location.hash && window.location.hash.length>1) { + let a; + if ($(location).attr('hash')) { + const clslink=stripPath(pathName())+':'+hashValue(); + a=$('.item a[class$="'+clslink.replace(/ - + - - + + mi-malloc: Overriding Malloc + - + + @@ -29,20 +31,16 @@
    - + - -
    -
    mi-malloc -  1.7/2.0 +
    +
    mi-malloc 1.8/2.1
    +
    - -   + @@ -56,10 +54,15 @@
    - + +

    @@ -74,8 +77,8 @@

    @@ -88,43 +91,47 @@
    - +
    +
    +
    +
    +
    Loading...
    +
    Searching...
    +
    No Matches
    +
    +
    +
    -
    -
    -
    Overriding Malloc
    +
    +
    Overriding Malloc
    -

    Overriding the standard malloc can be done either dynamically or statically.

    +

    Overriding the standard malloc (and new) can be done either dynamically or statically.

    Dynamic override

    This is the recommended way to override the standard malloc interface.

    -

    Linux, BSD

    -

    On these systems we preload the mimalloc shared library so all calls to the standard malloc interface are resolved to the mimalloc library.

    -
      -
    • env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
    • -
    -

    You can set extra environment variables to check that mimalloc is running, like:

    env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
    -

    or run with the debug version to get detailed statistics:

    env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
    -

    MacOS

    -

    On macOS we can also preload the mimalloc shared library so all calls to the standard malloc interface are resolved to the mimalloc library.

    -
      -
    • env DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram
    • -
    -

    Note that certain security restrictions may apply when doing this from the shell.

    -

    (Note: macOS support for dynamic overriding is recent, please report any issues.)

    -

    Windows

    -

    Overriding on Windows is robust and has the particular advantage to be able to redirect all malloc/free calls that go through the (dynamic) C runtime allocator, including those from other DLL's or libraries.

    -

    The overriding on Windows requires that you link your program explicitly with the mimalloc DLL and use the C-runtime library as a DLL (using the /MD or /MDd switch). Also, the mimalloc-redirect.dll (or mimalloc-redirect32.dll) must be available in the same folder as the main mimalloc-override.dll at runtime (as it is a dependency). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to mimalloc (in mimalloc-override.dll).

    -

    To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the main function, like mi_version() (or use the /INCLUDE:mi_version switch on the linker). See the mimalloc-override-test project for an example on how to use this. For best performance on Windows with C++, it is also recommended to also override the new/delete operations (by including mimalloc-new-delete.h a single(!) source file in your project without linking to the mimalloc library).

    +

    Dynamic Override on Linux, BSD

    +

    On these ELF-based systems we preload the mimalloc shared library so all calls to the standard malloc interface are resolved to the mimalloc library.

    > env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
    +

    You can set extra environment variables to check that mimalloc is running, like:

    > env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
    +

    or run with the debug version to get detailed statistics:

    > env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
    +

    Dynamic Override on MacOS

    +

    On macOS we can also preload the mimalloc shared library so all calls to the standard malloc interface are resolved to the mimalloc library.

    > env DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram
    +

    Note that certain security restrictions may apply when doing this from the shell.

    +

    Dynamic Override on Windows

    +

    Dynamically overriding on mimalloc on Windows is robust and has the particular advantage to be able to redirect all malloc/free calls that go through the (dynamic) C runtime allocator, including those from other DLL's or libraries. As it intercepts all allocation calls on a low level, it can be used reliably on large programs that include other 3rd party components. There are four requirements to make the overriding work well:

    +
      +
    1. Use the C-runtime library as a DLL (using the /MD or /MDd switch).
    2. +
    3. Link your program explicitly with the mimalloc.lib export library for the mimalloc.dll. (which must be compiled with -DMI_OVERRIDE=ON, which is the default though). To ensure the mimalloc.dll is actually loaded at run-time it is easiest to insert some call to the mimalloc API in the main function, like mi_version() (or use the /include:mi_version switch on the linker command, or similarly, #pragma comment(linker, "/include:mi_version") in some source file). See the mimalloc-test-override project for an example on how to use this.
    4. +
    5. The mimalloc-redirect.dll must be put in the same directory as the main mimalloc.dll at runtime (as it is a dependency of that DLL). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to mimalloc functions (which reside in mimalloc.dll).
    6. +
    7. Ensure the mimalloc.dll comes as early as possible in the import list of the final executable (so it can intercept all potential allocations). You can use minject -l <exe> to check this if needed.
    8. +
    +

    For best performance on Windows with C++, it is also recommended to also override the new/delete operations (by including mimalloc-new-delete.h a single(!) source file in your project).

    The environment variable MIMALLOC_DISABLE_REDIRECT=1 can be used to disable dynamic overriding at run-time. Use MIMALLOC_VERBOSE=1 to check if mimalloc was successfully redirected.

    -

    (Note: in principle, it is possible to even patch existing executables without any recompilation if they are linked with the dynamic C runtime (ucrtbase.dll) – just put the mimalloc-override.dll into the import table (and put mimalloc-redirect.dll in the same folder) Such patching can be done for example with CFF Explorer).

    +

    For different platforms than x64, you may need a specific [redirection dll](bin). Furthermore, we cannot always re-link an executable or ensure mimalloc.dll comes first in the import table. In such cases the [minject](bin) tool can be used to patch the executable's import tables.

    Static override

    -

    On Unix systems, you can also statically link with mimalloc to override the standard malloc interface. The recommended way is to link the final program with the mimalloc single object file (mimalloc-override.o). We use an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the mimalloc library, link it as the first object file. For example:

    -
    gcc -o myprogram mimalloc-override.o myfile1.c ...
    -

    List of Overrides:

    +

    On Unix-like systems, you can also statically link with mimalloc to override the standard malloc interface. The recommended way is to link the final program with the mimalloc single object file (mimalloc.o). We use an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the mimalloc library, link it as the first object file. For example:

    > gcc -o myprogram mimalloc.o myfile1.c ...
    +

    Another way to override statically that works on all platforms, is to link statically to mimalloc (as shown in the introduction) and include a header file in each source file that re-defines malloc etc. to mi_malloc. This is provided by mimalloc-override.h. This only works reliably though if all sources are under your control or otherwise mixing of pointers from different heaps may occur!

    +

    List of Overrides:

    The specific functions that get redirected to the mimalloc library are:

    // C
    void* malloc(size_t size);
    @@ -142,10 +149,10 @@

    Static override

    void operator delete(void* p);
    void operator delete[](void* p);
    -
    void* operator new(std::size_t n) noexcept(false);
    -
    void* operator new[](std::size_t n) noexcept(false);
    -
    void* operator new( std::size_t n, std::align_val_t align) noexcept(false);
    -
    void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false);
    +
    void* operator new(std::size_t n) noexcept(false);
    +
    void* operator new[](std::size_t n) noexcept(false);
    +
    void* operator new( std::size_t n, std::align_val_t align) noexcept(false);
    +
    void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false);
    void* operator new ( std::size_t count, const std::nothrow_t& tag);
    void* operator new[]( std::size_t count, const std::nothrow_t& tag);
    @@ -191,7 +198,7 @@

    Static override

    diff --git a/depends/mimalloc/docs/pages.html b/depends/mimalloc/docs/pages.html index 6999a810d037..b03b05f74c51 100644 --- a/depends/mimalloc/docs/pages.html +++ b/depends/mimalloc/docs/pages.html @@ -1,24 +1,26 @@ - + - - + + mi-malloc: Related Pages + - + + @@ -29,20 +31,16 @@
    - + - -
    -
    mi-malloc -  1.7/2.0 +
    +
    mi-malloc 1.8/2.1
    +
    - -   + @@ -56,10 +54,15 @@
    - + +
    @@ -74,8 +77,8 @@
    @@ -88,22 +91,28 @@
    - +
    +
    +
    +
    +
    Loading...
    +
    Searching...
    +
    No Matches
    +
    +
    +
    -
    -
    Related Pages
    +
    Related Pages
    Here is a list of all related documentation pages:
    @@ -112,7 +121,7 @@ diff --git a/depends/mimalloc/docs/resize.js b/depends/mimalloc/docs/resize.js index e1ad0fe3ba04..a571744065a0 100644 --- a/depends/mimalloc/docs/resize.js +++ b/depends/mimalloc/docs/resize.js @@ -22,119 +22,126 @@ @licend The above is the entire license notice for the JavaScript code in this file */ -function initResizable() -{ - var cookie_namespace = 'doxygen'; - var sidenav,navtree,content,header,collapsed,collapsedWidth=0,barWidth=6,desktop_vp=768,titleHeight; - function readCookie(cookie) - { - var myCookie = cookie_namespace+"_"+cookie+"="; - if (document.cookie) { - var index = document.cookie.indexOf(myCookie); - if (index != -1) { - var valStart = index + myCookie.length; - var valEnd = document.cookie.indexOf(";", valStart); - if (valEnd == -1) { - valEnd = document.cookie.length; - } - var val = document.cookie.substring(valStart, valEnd); - return val; - } - } - return 0; - } - - function writeCookie(cookie, val, expiration) - { - if (val==undefined) return; - if (expiration == null) { - var date = new Date(); - date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week - expiration = date.toGMTString(); - } - document.cookie = cookie_namespace + "_" + cookie + "=" + val + "; expires=" + expiration+"; path=/"; - } +function initResizable(treeview) { + let sidenav,navtree,content,header,footer,barWidth=6; + const RESIZE_COOKIE_NAME = ''+'width'; - function resizeWidth() - { - var windowWidth = $(window).width() + "px"; - var sidenavWidth = $(sidenav).outerWidth(); + function resizeWidth() { + const sidenavWidth = $(sidenav).outerWidth(); content.css({marginLeft:parseInt(sidenavWidth)+"px"}); - writeCookie('width',sidenavWidth-barWidth, null); + if (typeof page_layout!=='undefined' && page_layout==1) { + footer.css({marginLeft:parseInt(sidenavWidth)+"px"}); + } + Cookie.writeSetting(RESIZE_COOKIE_NAME,sidenavWidth-barWidth); } - function restoreWidth(navWidth) - { - var windowWidth = $(window).width() + "px"; + function restoreWidth(navWidth) { content.css({marginLeft:parseInt(navWidth)+barWidth+"px"}); + if (typeof page_layout!=='undefined' && page_layout==1) { + footer.css({marginLeft:parseInt(navWidth)+barWidth+"px"}); + } sidenav.css({width:navWidth + "px"}); } - function resizeHeight() - { - var headerHeight = header.outerHeight(); - var footerHeight = footer.outerHeight(); - var windowHeight = $(window).height() - headerHeight - footerHeight; - content.css({height:windowHeight + "px"}); - navtree.css({height:windowHeight + "px"}); - sidenav.css({height:windowHeight + "px"}); - var width=$(window).width(); - if (width!=collapsedWidth) { - if (width=desktop_vp) { - if (!collapsed) { - collapseExpand(); - } - } else if (width>desktop_vp && collapsedWidth0) { - restoreWidth(0); - collapsed=true; - } - else { - var width = readCookie('width'); - if (width>200 && width<$(window).width()) { restoreWidth(width); } else { restoreWidth(200); } - collapsed=false; + newWidth=0; + } else { + const width = Cookie.readSetting(RESIZE_COOKIE_NAME,180); + newWidth = (width>180 && width<$(window).width()) ? width : 180; } + restoreWidth(newWidth); + const sidenavWidth = $(sidenav).outerWidth(); + Cookie.writeSetting(RESIZE_COOKIE_NAME,sidenavWidth-barWidth); } header = $("#top"); - sidenav = $("#side-nav"); content = $("#doc-content"); - navtree = $("#nav-tree"); footer = $("#nav-path"); - $(".side-nav-resizable").resizable({resize: function(e, ui) { resizeWidth(); } }); - $(sidenav).resizable({ minWidth: 0 }); - $(window).resize(function() { resizeHeight(); }); - var device = navigator.userAgent.toLowerCase(); - var touch_device = device.match(/(iphone|ipod|ipad|android)/); - if (touch_device) { /* wider split bar for touch only devices */ - $(sidenav).css({ paddingRight:'20px' }); - $('.ui-resizable-e').css({ width:'20px' }); - $('#nav-sync').css({ right:'34px' }); - barWidth=20; + sidenav = $("#side-nav"); + if (!treeview) { +// title = $("#titlearea"); +// titleH = $(title).height(); +// let animating = false; +// content.on("scroll", function() { +// slideOpts = { duration: 200, +// step: function() { +// contentHeight = $(window).height() - header.outerHeight(); +// content.css({ height : contentHeight + "px" }); +// }, +// done: function() { animating=false; } +// }; +// if (content.scrollTop()>titleH && title.css('display')!='none' && !animating) { +// title.slideUp(slideOpts); +// animating=true; +// } else if (content.scrollTop()<=titleH && title.css('display')=='none' && !animating) { +// title.slideDown(slideOpts); +// animating=true; +// } +// }); + } else { + navtree = $("#nav-tree"); + $(".side-nav-resizable").resizable({resize: function(e, ui) { resizeWidth(); } }); + $(sidenav).resizable({ minWidth: 0 }); } - var width = readCookie('width'); - if (width) { restoreWidth(width); } else { resizeWidth(); } - resizeHeight(); - var url = location.href; - var i=url.indexOf("#"); + $(window).resize(function() { resizeHeight(treeview); }); + if (treeview) + { + const device = navigator.userAgent.toLowerCase(); + const touch_device = device.match(/(iphone|ipod|ipad|android)/); + if (touch_device) { /* wider split bar for touch only devices */ + $(sidenav).css({ paddingRight:'20px' }); + $('.ui-resizable-e').css({ width:'20px' }); + $('#nav-sync').css({ right:'34px' }); + barWidth=20; + } + const width = Cookie.readSetting(RESIZE_COOKIE_NAME,180); + if (width) { restoreWidth(width); } else { resizeWidth(); } + } + resizeHeight(treeview); + const url = location.href; + const i=url.indexOf("#"); if (i>=0) window.location.hash=url.substr(i); - var _preventDefault = function(evt) { evt.preventDefault(); }; - $("#splitbar").bind("dragstart", _preventDefault).bind("selectstart", _preventDefault); - $(".ui-resizable-handle").dblclick(collapseExpand); - $(window).on('load',resizeHeight); + const _preventDefault = function(evt) { evt.preventDefault(); }; + if (treeview) + { + $("#splitbar").bind("dragstart", _preventDefault).bind("selectstart", _preventDefault); + $(".ui-resizable-handle").dblclick(collapseExpand); + // workaround for firefox + $("body").css({overflow: "hidden"}); + } + $(window).on('load',function() { resizeHeight(treeview); }); } /* @license-end */ diff --git a/depends/mimalloc/docs/search/all_1.js b/depends/mimalloc/docs/search/all_1.js index 7f1097c09520..d371a39915e0 100644 --- a/depends/mimalloc/docs/search/all_1.js +++ b/depends/mimalloc/docs/search/all_1.js @@ -1,4 +1,6 @@ var searchData= [ - ['aligned_20allocation_1',['Aligned Allocation',['../group__aligned.html',1,'']]] + ['aligned_20allocation_0',['Aligned Allocation',['../group__aligned.html',1,'']]], + ['allocation_1',['Allocation',['../group__aligned.html',1,'Aligned Allocation'],['../group__malloc.html',1,'Basic Allocation'],['../group__heap.html',1,'Heap Allocation']]], + ['allocation_2',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_10.js b/depends/mimalloc/docs/search/all_10.js new file mode 100644 index 000000000000..1437d04ac002 --- /dev/null +++ b/depends/mimalloc/docs/search/all_10.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/all_2.js b/depends/mimalloc/docs/search/all_2.js index 00576d78a20d..6a297a47bfaf 100644 --- a/depends/mimalloc/docs/search/all_2.js +++ b/depends/mimalloc/docs/search/all_2.js @@ -1,7 +1,7 @@ var searchData= [ - ['basic_20allocation_2',['Basic Allocation',['../group__malloc.html',1,'']]], - ['block_5fsize_3',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]], - ['blocks_4',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]], - ['building_5',['Building',['../build.html',1,'']]] + ['basic_20allocation_0',['Basic Allocation',['../group__malloc.html',1,'']]], + ['block_5fsize_1',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]], + ['blocks_2',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]], + ['building_3',['Building',['../build.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_3.js b/depends/mimalloc/docs/search/all_3.js index 9a029ee0b764..76374a610538 100644 --- a/depends/mimalloc/docs/search/all_3.js +++ b/depends/mimalloc/docs/search/all_3.js @@ -1,5 +1,5 @@ var searchData= [ - ['c_2b_2b_20wrappers_6',['C++ wrappers',['../group__cpp.html',1,'']]], - ['committed_7',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]] + ['c_20wrappers_0',['C++ wrappers',['../group__cpp.html',1,'']]], + ['committed_1',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]] ]; diff --git a/depends/mimalloc/docs/search/all_4.js b/depends/mimalloc/docs/search/all_4.js index 5dc5128610b7..a2c108b7efbf 100644 --- a/depends/mimalloc/docs/search/all_4.js +++ b/depends/mimalloc/docs/search/all_4.js @@ -1,5 +1,5 @@ var searchData= [ - ['environment_20options_8',['Environment Options',['../environment.html',1,'']]], - ['extended_20functions_9',['Extended Functions',['../group__extended.html',1,'']]] + ['environment_20options_0',['Environment Options',['../environment.html',1,'']]], + ['extended_20functions_1',['Extended Functions',['../group__extended.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_5.js b/depends/mimalloc/docs/search/all_5.js index 7441d85349c2..6f8d30e9f2b4 100644 --- a/depends/mimalloc/docs/search/all_5.js +++ b/depends/mimalloc/docs/search/all_5.js @@ -1,5 +1,5 @@ var searchData= [ - ['heap_20allocation_10',['Heap Allocation',['../group__heap.html',1,'']]], - ['heap_20introspection_11',['Heap Introspection',['../group__analysis.html',1,'']]] + ['full_5fblock_5fsize_0',['full_block_size',['../group__analysis.html#ab53664e31d7fe2564f8d42041ef75cb3',1,'mi_heap_area_t']]], + ['functions_1',['Extended Functions',['../group__extended.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_6.js b/depends/mimalloc/docs/search/all_6.js index 6d32b7b1dba9..5a38d8485110 100644 --- a/depends/mimalloc/docs/search/all_6.js +++ b/depends/mimalloc/docs/search/all_6.js @@ -1,153 +1,6 @@ var searchData= [ - ['mi_5f_5fposix_5fmemalign_12',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]], - ['mi_5faligned_5falloc_13',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]], - ['mi_5falignment_5fmax_14',['MI_ALIGNMENT_MAX',['../group__aligned.html#ga83c03016066b438f51a8095e9140be06',1,'mimalloc-doc.h']]], - ['mi_5fblock_5fvisit_5ffun_15',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_16',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_5faligned_17',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_5faligned_5fat_18',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_5ftp_19',['mi_calloc_tp',['../group__typed.html#gae80c47c9d4cab10961fff1a8ac98fc07',1,'mimalloc-doc.h']]], - ['mi_5fcfree_20',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]], - ['mi_5fcheck_5fowned_21',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], - ['mi_5fcollect_22',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], - ['mi_5fdeferred_5ffree_5ffun_23',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]], - ['mi_5ferror_5ffun_24',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]], - ['mi_5fexpand_25',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]], - ['mi_5ffree_26',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], - ['mi_5ffree_5faligned_27',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], - ['mi_5ffree_5fsize_28',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]], - ['mi_5ffree_5fsize_5faligned_29',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]], - ['mi_5fgood_5fsize_30',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]], - ['mi_5fheap_5farea_5ft_31',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]], - ['mi_5fheap_5fcalloc_32',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_5faligned_33',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_5faligned_5fat_34',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_5ftp_35',['mi_heap_calloc_tp',['../group__typed.html#ga4e5d1f1707c90e5f55e023ac5f45fe74',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcheck_5fowned_36',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcollect_37',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcontains_5fblock_38',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fdelete_39',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fdestroy_40',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fget_5fbacking_41',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fget_5fdefault_42',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_43',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5faligned_44',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5faligned_5fat_45',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5fsmall_46',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5ftp_47',['mi_heap_malloc_tp',['../group__typed.html#ga653bcb24ac495bc19940ecd6898f9cd7',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmallocn_48',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmallocn_5ftp_49',['mi_heap_mallocn_tp',['../group__typed.html#ga6b75cb9c4b9c647661d0924552dc6e83',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fnew_50',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_51',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_5faligned_52',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_5faligned_5fat_53',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]], - ['mi_5fheap_5freallocf_54',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]], - ['mi_5fheap_5freallocn_55',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]], - ['mi_5fheap_5freallocn_5ftp_56',['mi_heap_reallocn_tp',['../group__typed.html#gaf213d5422ec35e7f6caad827c79bc948',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealpath_57',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_58',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_5faligned_59',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_5faligned_5fat_60',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_5ftp_61',['mi_heap_recalloc_tp',['../group__typed.html#ga3e50a1600958fcaf1a7f3560c9174f9e',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_62',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_5faligned_63',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_5faligned_5fat_64',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fset_5fdefault_65',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fstrdup_66',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fstrndup_67',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]], - ['mi_5fheap_5ft_68',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fvisit_5fblocks_69',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_70',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_5faligned_71',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_5faligned_5fat_72',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_5ftp_73',['mi_heap_zalloc_tp',['../group__typed.html#gad6e87e86e994aa14416ae9b5d4c188fe',1,'mimalloc-doc.h']]], - ['mi_5fis_5fin_5fheap_5fregion_74',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]], - ['mi_5fis_5fredirected_75',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_76',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5faligned_77',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5faligned_5fat_78',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fsize_79',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fsmall_80',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5ftp_81',['mi_malloc_tp',['../group__typed.html#ga0619a62c5fd886f1016030abe91f0557',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fusable_5fsize_82',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]], - ['mi_5fmallocn_83',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]], - ['mi_5fmallocn_5ftp_84',['mi_mallocn_tp',['../group__typed.html#gae5cb6e0fafc9f23169c5622e077afe8b',1,'mimalloc-doc.h']]], - ['mi_5fmanage_5fos_5fmemory_85',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]], - ['mi_5fmemalign_86',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]], - ['mi_5fnew_87',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_88',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_5fnothrow_89',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fn_90',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fnothrow_91',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], - ['mi_5fnew_5frealloc_92',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]], - ['mi_5fnew_5freallocn_93',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]], - ['mi_5foption_5fdisable_94',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]], - ['mi_5foption_5feager_5fcommit_95',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], - ['mi_5foption_5feager_5fcommit_5fdelay_96',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], - ['mi_5foption_5feager_5fregion_5fcommit_97',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]], - ['mi_5foption_5fenable_98',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]], - ['mi_5foption_5fget_99',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]], - ['mi_5foption_5fis_5fenabled_100',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]], - ['mi_5foption_5flarge_5fos_5fpages_101',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]], - ['mi_5foption_5fos_5ftag_102',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]], - ['mi_5foption_5fpage_5freset_103',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]], - ['mi_5foption_5freserve_5fhuge_5fos_5fpages_104',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], - ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_105',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]], - ['mi_5foption_5freset_5fdecommits_106',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]], - ['mi_5foption_5freset_5fdelay_107',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]], - ['mi_5foption_5fsegment_5fcache_108',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]], - ['mi_5foption_5fsegment_5freset_109',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_110',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fdefault_111',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fenabled_112',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fenabled_5fdefault_113',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]], - ['mi_5foption_5fshow_5ferrors_114',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], - ['mi_5foption_5fshow_5fstats_115',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], - ['mi_5foption_5ft_116',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]], - ['mi_5foption_5fuse_5fnuma_5fnodes_117',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], - ['mi_5foption_5fverbose_118',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]], - ['mi_5foutput_5ffun_119',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]], - ['mi_5fposix_5fmemalign_120',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]], - ['mi_5fprocess_5finfo_121',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]], - ['mi_5fpvalloc_122',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]], - ['mi_5frealloc_123',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]], - ['mi_5frealloc_5faligned_124',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]], - ['mi_5frealloc_5faligned_5fat_125',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]], - ['mi_5freallocarr_126',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]], - ['mi_5freallocarray_127',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]], - ['mi_5freallocf_128',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]], - ['mi_5freallocn_129',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]], - ['mi_5freallocn_5ftp_130',['mi_reallocn_tp',['../group__typed.html#ga1158b49a55dfa81f58a4426a7578f523',1,'mimalloc-doc.h']]], - ['mi_5frealpath_131',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_132',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_5faligned_133',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_5faligned_5fat_134',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], - ['mi_5fregister_5fdeferred_5ffree_135',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], - ['mi_5fregister_5ferror_136',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], - ['mi_5fregister_5foutput_137',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages_5fat_138',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_139',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fos_5fmemory_140',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_141',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_5faligned_142',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_5faligned_5fat_143',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]], - ['mi_5fsmall_5fsize_5fmax_144',['MI_SMALL_SIZE_MAX',['../group__extended.html#ga1ea64283508718d9d645c38efc2f4305',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fmerge_145',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint_146',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint_5fout_147',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]], - ['mi_5fstats_5freset_148',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], - ['mi_5fstl_5fallocator_149',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]], - ['mi_5fstrdup_150',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]], - ['mi_5fstrndup_151',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fdone_152',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], - ['mi_5fthread_5finit_153',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fstats_5fprint_5fout_154',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], - ['mi_5fusable_5fsize_155',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], - ['mi_5fvalloc_156',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_157',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5faligned_158',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5faligned_5fat_159',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5fsmall_160',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5ftp_161',['mi_zalloc_tp',['../group__typed.html#gac77a61bdaf680a803785fe307820b48c',1,'mimalloc-doc.h']]] + ['heap_20allocation_0',['Heap Allocation',['../group__heap.html',1,'']]], + ['heap_20introspection_1',['Heap Introspection',['../group__analysis.html',1,'']]], + ['heap_5ftag_2',['heap_tag',['../group__analysis.html#a2b7a0c92ece8daf46b558efc990ebdc1',1,'mi_heap_area_t']]] ]; diff --git a/depends/mimalloc/docs/search/all_7.js b/depends/mimalloc/docs/search/all_7.js index 8f296aa53e1b..6d20843280ff 100644 --- a/depends/mimalloc/docs/search/all_7.js +++ b/depends/mimalloc/docs/search/all_7.js @@ -1,4 +1,5 @@ var searchData= [ - ['overriding_20malloc_162',['Overriding Malloc',['../overrides.html',1,'']]] + ['initialized_20re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]], + ['introspection_1',['Heap Introspection',['../group__analysis.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_8.js b/depends/mimalloc/docs/search/all_8.js index a9caa77c7bfd..5071ed69e8a2 100644 --- a/depends/mimalloc/docs/search/all_8.js +++ b/depends/mimalloc/docs/search/all_8.js @@ -1,5 +1,4 @@ var searchData= [ - ['performance_163',['Performance',['../bench.html',1,'']]], - ['posix_164',['Posix',['../group__posix.html',1,'']]] + ['library_0',['Using the library',['../using.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_9.js b/depends/mimalloc/docs/search/all_9.js index f6b4ba352ef6..400cd0239def 100644 --- a/depends/mimalloc/docs/search/all_9.js +++ b/depends/mimalloc/docs/search/all_9.js @@ -1,5 +1,192 @@ var searchData= [ - ['reserved_165',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]], - ['runtime_20options_166',['Runtime Options',['../group__options.html',1,'']]] + ['macros_0',['Typed Macros',['../group__typed.html',1,'']]], + ['malloc_1',['Overriding Malloc',['../overrides.html',1,'']]], + ['malloc_2',['mi-malloc',['../index.html',1,'']]], + ['mi_20malloc_3',['mi-malloc',['../index.html',1,'']]], + ['mi_5f_5fexpand_4',['mi__expand',['../group__posix.html#ga66bcfeb4faedbb42b796bc680821ef84',1,'mimalloc-doc.h']]], + ['mi_5f_5fposix_5fmemalign_5',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]], + ['mi_5fabandoned_5fvisit_5fblocks_6',['mi_abandoned_visit_blocks',['../group__analysis.html#ga6a4865a887b2ec5247854af61562503c',1,'mimalloc-doc.h']]], + ['mi_5faligned_5falloc_7',['mi_aligned_alloc',['../group__posix.html#ga430ed1513f0571ff83be00ec58a98ee0',1,'mimalloc-doc.h']]], + ['mi_5faligned_5foffset_5frecalloc_8',['mi_aligned_offset_recalloc',['../group__posix.html#ga16570deddd559001b44953eedbad0084',1,'mimalloc-doc.h']]], + ['mi_5faligned_5frecalloc_9',['mi_aligned_recalloc',['../group__posix.html#gaf82cbb4b4f24acf723348628451798d3',1,'mimalloc-doc.h']]], + ['mi_5farena_5farea_10',['mi_arena_area',['../group__extended.html#ga9a25a00a22151619a0be91a10af7787f',1,'mimalloc-doc.h']]], + ['mi_5farena_5fid_5ft_11',['mi_arena_id_t',['../group__extended.html#ga99fe38650d0b02e0e0f89ee024db91d3',1,'mimalloc-doc.h']]], + ['mi_5fblock_5fvisit_5ffun_12',['mi_block_visit_fun',['../group__analysis.html#ga8255dc9371e6b299d9802a610c4e34ec',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_13',['mi_calloc',['../group__malloc.html#ga6686568014b54d1e6c7ac64a076e4f56',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_5faligned_14',['mi_calloc_aligned',['../group__aligned.html#ga424ef386fb1f9f8e0a86ab53f16eaaf1',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_5faligned_5fat_15',['mi_calloc_aligned_at',['../group__aligned.html#ga977f96bd2c5c141bcd70e6685c90d6c3',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_5ftp_16',['mi_calloc_tp',['../group__typed.html#gae80c47c9d4cab10961fff1a8ac98fc07',1,'mimalloc-doc.h']]], + ['mi_5fcfree_17',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]], + ['mi_5fcheck_5fowned_18',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], + ['mi_5fcollect_19',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], + ['mi_5fdebug_5fshow_5farenas_20',['mi_debug_show_arenas',['../group__extended.html#gad7439207f8f71fb6c382a9ea20b997e7',1,'mimalloc-doc.h']]], + ['mi_5fdeferred_5ffree_5ffun_21',['mi_deferred_free_fun',['../group__extended.html#ga292a45f7dbc7cd23c5352ce1f0002816',1,'mimalloc-doc.h']]], + ['mi_5fdupenv_5fs_22',['mi_dupenv_s',['../group__posix.html#gab41369c1a1da7504013a7a0b1d4dd958',1,'mimalloc-doc.h']]], + ['mi_5ferror_5ffun_23',['mi_error_fun',['../group__extended.html#ga83fc6a688b322261e1c2deab000b0591',1,'mimalloc-doc.h']]], + ['mi_5fexpand_24',['mi_expand',['../group__malloc.html#ga19299856216cfbb08e2628593654dfb0',1,'mimalloc-doc.h']]], + ['mi_5ffree_25',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], + ['mi_5ffree_5faligned_26',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], + ['mi_5ffree_5fsize_27',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]], + ['mi_5ffree_5fsize_5faligned_28',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]], + ['mi_5fgood_5fsize_29',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]], + ['mi_5fheap_5farea_5ft_30',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]], + ['mi_5fheap_5fcalloc_31',['mi_heap_calloc',['../group__heap.html#gac0098aaf231d3e9586c73136d5df95da',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_5faligned_32',['mi_heap_calloc_aligned',['../group__heap.html#gacafcc26df827c7a7de5e850217566108',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_5faligned_5fat_33',['mi_heap_calloc_aligned_at',['../group__heap.html#gaa42ec2079989c4374f2c331d9b35f4e4',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_5ftp_34',['mi_heap_calloc_tp',['../group__typed.html#ga4e5d1f1707c90e5f55e023ac5f45fe74',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcheck_5fowned_35',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcollect_36',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcontains_5fblock_37',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fdelete_38',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fdestroy_39',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fget_5fbacking_40',['mi_heap_get_backing',['../group__heap.html#gac6ac9f0e7be9ab4ff70acfc8dad1235a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fget_5fdefault_41',['mi_heap_get_default',['../group__heap.html#ga14c667a6e2c5d28762d8cb7d4e057909',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_42',['mi_heap_malloc',['../group__heap.html#gab374e206c7034e0d899fb934e4f4a863',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5faligned_43',['mi_heap_malloc_aligned',['../group__heap.html#ga33f4f05b7fea7af2113c62a4bf882cc5',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5faligned_5fat_44',['mi_heap_malloc_aligned_at',['../group__heap.html#gae7ffc045c3996497a7f3a5f6fe7b8aaa',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5fsmall_45',['mi_heap_malloc_small',['../group__heap.html#ga012c5c8abe22b10043de39ff95909541',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5ftp_46',['mi_heap_malloc_tp',['../group__typed.html#ga653bcb24ac495bc19940ecd6898f9cd7',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmallocn_47',['mi_heap_mallocn',['../group__heap.html#gab0f755c0b21c387fe8e9024200faa372',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmallocn_5ftp_48',['mi_heap_mallocn_tp',['../group__typed.html#ga6b75cb9c4b9c647661d0924552dc6e83',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_49',['mi_heap_new',['../group__heap.html#gaa718bb226ec0546ba6d1b6cb32179f3a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_5fex_50',['mi_heap_new_ex',['../group__extended.html#ga3ae360583f4351aa5267ee7e43008faf',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_5fin_5farena_51',['mi_heap_new_in_arena',['../group__extended.html#gaaf2d9976576d5efd5544be12848af949',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_52',['mi_heap_realloc',['../group__heap.html#gac5252d6a2e510bd349e4fcb452e6a93a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_5faligned_53',['mi_heap_realloc_aligned',['../group__heap.html#gaccf8c249872f30bf1c2493a09197d734',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_5faligned_5fat_54',['mi_heap_realloc_aligned_at',['../group__heap.html#ga6df988a7219d5707f010d5f3eb0dc3f5',1,'mimalloc-doc.h']]], + ['mi_5fheap_5freallocf_55',['mi_heap_reallocf',['../group__heap.html#gae7cd171425bee04c683c65a3701f0b4a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5freallocn_56',['mi_heap_reallocn',['../group__heap.html#gaccf7bfe10ce510a000d3547d9cf7fa29',1,'mimalloc-doc.h']]], + ['mi_5fheap_5freallocn_5ftp_57',['mi_heap_reallocn_tp',['../group__typed.html#gaf213d5422ec35e7f6caad827c79bc948',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealpath_58',['mi_heap_realpath',['../group__heap.html#ga55545a3ec6da29c5b4f62e540ecac1e2',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_59',['mi_heap_recalloc',['../group__zeroinit.html#gad1a0d325d930eeb80f25e3fea37aacde',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_5faligned_60',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga87ddd674bf1c67237d780d0b9e0f0f32',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_5faligned_5fat_61',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga07b5bcbaf00d0d2e598c232982588496',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_5ftp_62',['mi_heap_recalloc_tp',['../group__typed.html#ga3e50a1600958fcaf1a7f3560c9174f9e',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_63',['mi_heap_rezalloc',['../group__zeroinit.html#ga8d8b7ebb24b513cd84d1a696048da60d',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_5faligned_64',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga5129f6dc46ee1613d918820a8a0533a7',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_5faligned_5fat_65',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#ga2bafa79c3f98ea74882349d44cffa5d9',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fset_5fdefault_66',['mi_heap_set_default',['../group__heap.html#ga349b677dec7da5eacdbc7a385bd62a4a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fstrdup_67',['mi_heap_strdup',['../group__heap.html#ga5754e09ccc51dd6bc73885bb6ea21b7a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fstrndup_68',['mi_heap_strndup',['../group__heap.html#gad224df78f1fbee942df8adf023e12cf3',1,'mimalloc-doc.h']]], + ['mi_5fheap_5ft_69',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fvisit_5fblocks_70',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_71',['mi_heap_zalloc',['../group__heap.html#gabebc796399619d964d8db77aa835e8c1',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_5faligned_72',['mi_heap_zalloc_aligned',['../group__heap.html#ga6466bde8b5712aa34e081a8317f9f471',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_5faligned_5fat_73',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga484e3d01cd174f78c7e53370e5a7c819',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_5ftp_74',['mi_heap_zalloc_tp',['../group__typed.html#gad6e87e86e994aa14416ae9b5d4c188fe',1,'mimalloc-doc.h']]], + ['mi_5fis_5fin_5fheap_5fregion_75',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]], + ['mi_5fis_5fredirected_76',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_77',['mi_malloc',['../group__malloc.html#gae1dd97b542420c87ae085e822b1229e8',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5faligned_78',['mi_malloc_aligned',['../group__aligned.html#ga69578ff1a98ca16e1dcd02c0995cd65c',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5faligned_5fat_79',['mi_malloc_aligned_at',['../group__aligned.html#ga2022f71b95a7cd6cce1b6e07752ae8ca',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fgood_5fsize_80',['mi_malloc_good_size',['../group__posix.html#ga9d23ac7885fed7413c11d8e0ffa31071',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fsize_81',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fsmall_82',['mi_malloc_small',['../group__extended.html#ga7f050bc6b897da82692174f5fce59cde',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5ftp_83',['mi_malloc_tp',['../group__typed.html#ga0619a62c5fd886f1016030abe91f0557',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fusable_5fsize_84',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]], + ['mi_5fmallocn_85',['mi_mallocn',['../group__malloc.html#ga61f46bade3db76ca24aaafedc40de7b6',1,'mimalloc-doc.h']]], + ['mi_5fmallocn_5ftp_86',['mi_mallocn_tp',['../group__typed.html#gae5cb6e0fafc9f23169c5622e077afe8b',1,'mimalloc-doc.h']]], + ['mi_5fmanage_5fos_5fmemory_87',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]], + ['mi_5fmanage_5fos_5fmemory_5fex_88',['mi_manage_os_memory_ex',['../group__extended.html#ga41ce8525d77bbb60f618fa1029994f6e',1,'mimalloc-doc.h']]], + ['mi_5fmbsdup_89',['mi_mbsdup',['../group__posix.html#ga7b82a44094fdec4d2084eb4288a979b0',1,'mimalloc-doc.h']]], + ['mi_5fmemalign_90',['mi_memalign',['../group__posix.html#ga726867f13fd29ca36064954c0285b1d8',1,'mimalloc-doc.h']]], + ['mi_5fnew_91',['mi_new',['../group__cpp.html#ga633d96e3bc7011f960df9f3b2731fc6a',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_92',['mi_new_aligned',['../group__cpp.html#ga79c54da0b4b4ce9fcc11d2f6ef6675f8',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_5fnothrow_93',['mi_new_aligned_nothrow',['../group__cpp.html#ga92ae00b6dd64406c7e64557711ec04b7',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn_94',['mi_new_n',['../group__cpp.html#gadd11b85c15d21d308386844b5233856c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fnothrow_95',['mi_new_nothrow',['../group__cpp.html#ga5cb4f120d1f7296074256215aa9a9e54',1,'mimalloc-doc.h']]], + ['mi_5fnew_5frealloc_96',['mi_new_realloc',['../group__cpp.html#ga6867d89baf992728e0cc20a1f47db4d0',1,'mimalloc-doc.h']]], + ['mi_5fnew_5freallocn_97',['mi_new_reallocn',['../group__cpp.html#gaace912ce086682d56f3ce9f7638d9d67',1,'mimalloc-doc.h']]], + ['mi_5foption_5fabandoned_5fpage_5fpurge_98',['mi_option_abandoned_page_purge',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca11e62ed69200a489a5be955582078c0c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fabandoned_5freclaim_5fon_5ffree_99',['mi_option_abandoned_reclaim_on_free',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca009e4b5684922ce664d73d2a8e1698d9',1,'mimalloc-doc.h']]], + ['mi_5foption_5fallow_5flarge_5fos_5fpages_100',['mi_option_allow_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7cc4804ced69004fa42a9a136a9ba556',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5feager_5fcommit_101',['mi_option_arena_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafd0c5ddbc4b59fd8b5216871728167a5',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5fpurge_5fmult_102',['mi_option_arena_purge_mult',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8236501f1ab45d26e6fd885d191a2b5e',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5freserve_103',['mi_option_arena_reserve',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cab1c88e23ae290bbeec824038a97959de',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdestroy_5fon_5fexit_104',['mi_option_destroy_on_exit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca6364331e305e7d3c0218b058ff3afc88',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisable_105',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisallow_5farena_5falloc_106',['mi_option_disallow_arena_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caeae1696100e4057ffc4182730cc04e40',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisallow_5fos_5falloc_107',['mi_option_disallow_os_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadcfb5a09580361b1be65901d2d812de6',1,'mimalloc-doc.h']]], + ['mi_5foption_5feager_5fcommit_108',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], + ['mi_5foption_5feager_5fcommit_5fdelay_109',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fenable_110',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_111',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_5fclamp_112',['mi_option_get_clamp',['../group__options.html#ga96ad9c406338bd314cfe878cfc9bf723',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_5fsize_113',['mi_option_get_size',['../group__options.html#ga274db5a6ac87cc24ef0b23e7006ed02c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fis_5fenabled_114',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]], + ['mi_5foption_5flimit_5fos_5falloc_115',['mi_option_limit_os_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9fa61bd9668479f8452d2195759444cc',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5ferrors_116',['mi_option_max_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caec6ecbe29d46a48205ed8823a8a52a6a',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5fsegment_5freclaim_117',['mi_option_max_segment_reclaim',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa9ad9005d7017c8c30ad2d6ba31db909',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5fwarnings_118',['mi_option_max_warnings',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caf9595921087e942602ee079158762665',1,'mimalloc-doc.h']]], + ['mi_5foption_5fos_5ftag_119',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fdecommits_120',['mi_option_purge_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9d15c5e3d2115eef681c17e4dd5ab9a4',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fdelay_121',['mi_option_purge_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadd351e615acd8563529c20a347be7290',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fextend_5fdelay_122',['mi_option_purge_extend_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca02005f164bdf03f5f00c5be726adf487',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fhuge_5fos_5fpages_123',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_124',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fos_5fmemory_125',['mi_option_reserve_os_memory',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4999c828cf79a0fb2de65d23f7333',1,'mimalloc-doc.h']]], + ['mi_5foption_5fretry_5fon_5foom_126',['mi_option_retry_on_oom',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8f51df355bf6651db899e6085b54865e',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_127',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fdefault_128',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fenabled_129',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fenabled_5fdefault_130',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]], + ['mi_5foption_5fshow_5ferrors_131',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], + ['mi_5foption_5fshow_5fstats_132',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], + ['mi_5foption_5ft_133',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fuse_5fnuma_5fnodes_134',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], + ['mi_5foption_5fverbose_135',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]], + ['mi_5foption_5fvisit_5fabandoned_136',['mi_option_visit_abandoned',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca38c67733a3956a1f4eeaca89fab9e78e',1,'mimalloc-doc.h']]], + ['mi_5foutput_5ffun_137',['mi_output_fun',['../group__extended.html#gadf31cea7d0332a81c8b882cbbdbadb8d',1,'mimalloc-doc.h']]], + ['mi_5fposix_5fmemalign_138',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]], + ['mi_5fprocess_5finfo_139',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]], + ['mi_5fpvalloc_140',['mi_pvalloc',['../group__posix.html#ga644bebccdbb2821542dd8c7e7641f476',1,'mimalloc-doc.h']]], + ['mi_5frealloc_141',['mi_realloc',['../group__malloc.html#ga0621af6a5e3aa384e6a1b548958bf583',1,'mimalloc-doc.h']]], + ['mi_5frealloc_5faligned_142',['mi_realloc_aligned',['../group__aligned.html#ga5d7a46d054b4d7abe9d8d2474add2edf',1,'mimalloc-doc.h']]], + ['mi_5frealloc_5faligned_5fat_143',['mi_realloc_aligned_at',['../group__aligned.html#gad06dcf2bb8faadb2c8ea61ee5d24bbf6',1,'mimalloc-doc.h']]], + ['mi_5freallocarr_144',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]], + ['mi_5freallocarray_145',['mi_reallocarray',['../group__posix.html#gadfeccb72748a2f6305474a37d9d57bce',1,'mimalloc-doc.h']]], + ['mi_5freallocf_146',['mi_reallocf',['../group__malloc.html#ga4dc3a4067037b151a64629fe8a332641',1,'mimalloc-doc.h']]], + ['mi_5freallocn_147',['mi_reallocn',['../group__malloc.html#ga8bddfb4a1270a0854bbcf44cb3980467',1,'mimalloc-doc.h']]], + ['mi_5freallocn_5ftp_148',['mi_reallocn_tp',['../group__typed.html#ga1158b49a55dfa81f58a4426a7578f523',1,'mimalloc-doc.h']]], + ['mi_5frealpath_149',['mi_realpath',['../group__malloc.html#ga94c3afcc086e85d75a57e9f76b9b71dd',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_150',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_5faligned_151',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e2169b48683aa0ab64f813fd68d839e',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_5faligned_5fat_152',['mi_recalloc_aligned_at',['../group__zeroinit.html#gaae25e4ddedd4e0fb61b1a8bd5d452750',1,'mimalloc-doc.h']]], + ['mi_5fregister_5fdeferred_5ffree_153',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5ferror_154',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], + ['mi_5fregister_5foutput_155',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat_156',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat_5fex_157',['mi_reserve_huge_os_pages_at_ex',['../group__extended.html#ga591aab1c2bc2ca920e33f0f9f9cb5c52',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_158',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fos_5fmemory_159',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fos_5fmemory_5fex_160',['mi_reserve_os_memory_ex',['../group__extended.html#ga32f519797fd9a81acb4f52d36e6d751b',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_161',['mi_rezalloc',['../group__zeroinit.html#gadfd34cd7b4f2bbda7ae06367a6360756',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_5faligned_162',['mi_rezalloc_aligned',['../group__zeroinit.html#ga4d02404fe1e7db00beb65f185e012caa',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_5faligned_5fat_163',['mi_rezalloc_aligned_at',['../group__zeroinit.html#ga6843a88285bbfcc3bdfccc60aafd1270',1,'mimalloc-doc.h']]], + ['mi_5fsmall_5fsize_5fmax_164',['MI_SMALL_SIZE_MAX',['../group__extended.html#ga1ea64283508718d9d645c38efc2f4305',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fmerge_165',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint_166',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint_5fout_167',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]], + ['mi_5fstats_5freset_168',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], + ['mi_5fstl_5fallocator_169',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]], + ['mi_5fstrdup_170',['mi_strdup',['../group__malloc.html#ga245ac90ebc2cfdd17de599e5fea59889',1,'mimalloc-doc.h']]], + ['mi_5fstrndup_171',['mi_strndup',['../group__malloc.html#ga486d0d26b3b3794f6d1cdb41a9aed92d',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fadd_5fcurrent_5fthread_172',['mi_subproc_add_current_thread',['../group__extended.html#gadbc53414eb68b275588ec001ce1ddc7c',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fdelete_173',['mi_subproc_delete',['../group__extended.html#gaa7d263e9429bac9ac8345c9d25de610e',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fid_5ft_174',['mi_subproc_id_t',['../group__extended.html#ga8c0bcd1fee27c7641e9c3c0d991b3b7d',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fmain_175',['mi_subproc_main',['../group__extended.html#ga2ecba0d7ebdc99e71bb985c4a1609806',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fnew_176',['mi_subproc_new',['../group__extended.html#ga8068cac328e41fa2170faef707315243',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fdone_177',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], + ['mi_5fthread_5finit_178',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fstats_5fprint_5fout_179',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], + ['mi_5fusable_5fsize_180',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], + ['mi_5fvalloc_181',['mi_valloc',['../group__posix.html#ga50cafb9722020402f065de93799f64ca',1,'mimalloc-doc.h']]], + ['mi_5fwcsdup_182',['mi_wcsdup',['../group__posix.html#gaa9fd7f25c9ac3a20e89b33bd6e383fcf',1,'mimalloc-doc.h']]], + ['mi_5fwdupenv_5fs_183',['mi_wdupenv_s',['../group__posix.html#ga6ac6a6a8f3c96f1af24bb8d0439cbbd1',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_184',['mi_zalloc',['../group__malloc.html#gae6e38c4403247a7b40d80419e093bfb8',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5faligned_185',['mi_zalloc_aligned',['../group__aligned.html#gaac7d0beb782f9b9ac31f47492b130f82',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5faligned_5fat_186',['mi_zalloc_aligned_at',['../group__aligned.html#ga7c1778805ce50ebbf02ccbd5e39d5dba',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5fsmall_187',['mi_zalloc_small',['../group__extended.html#ga51c47637e81df0e2f13a2d7a2dec123e',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5ftp_188',['mi_zalloc_tp',['../group__typed.html#gac77a61bdaf680a803785fe307820b48c',1,'mimalloc-doc.h']]] ]; diff --git a/depends/mimalloc/docs/search/all_a.js b/depends/mimalloc/docs/search/all_a.js index 699b5456e717..dee0ab7ccfc4 100644 --- a/depends/mimalloc/docs/search/all_a.js +++ b/depends/mimalloc/docs/search/all_a.js @@ -1,4 +1,5 @@ var searchData= [ - ['typed_20macros_167',['Typed Macros',['../group__typed.html',1,'']]] + ['options_0',['Options',['../environment.html',1,'Environment Options'],['../group__options.html',1,'Runtime Options']]], + ['overriding_20malloc_1',['Overriding Malloc',['../overrides.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_b.js b/depends/mimalloc/docs/search/all_b.js index 73a2671d9261..44ef0a69f929 100644 --- a/depends/mimalloc/docs/search/all_b.js +++ b/depends/mimalloc/docs/search/all_b.js @@ -1,5 +1,5 @@ var searchData= [ - ['used_168',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]], - ['using_20the_20library_169',['Using the library',['../using.html',1,'']]] + ['performance_0',['Performance',['../bench.html',1,'']]], + ['posix_1',['Posix',['../group__posix.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_c.js b/depends/mimalloc/docs/search/all_c.js index 192fb1cb78b0..d0a080fe2dcf 100644 --- a/depends/mimalloc/docs/search/all_c.js +++ b/depends/mimalloc/docs/search/all_c.js @@ -1,4 +1,6 @@ var searchData= [ - ['zero_20initialized_20re_2dallocation_170',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] + ['re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]], + ['reserved_1',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]], + ['runtime_20options_2',['Runtime Options',['../group__options.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_d.js b/depends/mimalloc/docs/search/all_d.js index 2b9b4cea09de..2cce101acf5c 100644 --- a/depends/mimalloc/docs/search/all_d.js +++ b/depends/mimalloc/docs/search/all_d.js @@ -1,4 +1,5 @@ var searchData= [ - ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] + ['the_20library_0',['Using the library',['../using.html',1,'']]], + ['typed_20macros_1',['Typed Macros',['../group__typed.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/all_e.js b/depends/mimalloc/docs/search/all_e.js new file mode 100644 index 000000000000..4c8c2e890ffe --- /dev/null +++ b/depends/mimalloc/docs/search/all_e.js @@ -0,0 +1,5 @@ +var searchData= +[ + ['used_0',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]], + ['using_20the_20library_1',['Using the library',['../using.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/all_f.js b/depends/mimalloc/docs/search/all_f.js new file mode 100644 index 000000000000..8f445b9fc8c2 --- /dev/null +++ b/depends/mimalloc/docs/search/all_f.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['wrappers_0',['C++ wrappers',['../group__cpp.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/classes_0.js b/depends/mimalloc/docs/search/classes_0.js index e3770fb49170..5ba187064f9a 100644 --- a/depends/mimalloc/docs/search/classes_0.js +++ b/depends/mimalloc/docs/search/classes_0.js @@ -1,5 +1,5 @@ var searchData= [ - ['mi_5fheap_5farea_5ft_171',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]], - ['mi_5fstl_5fallocator_172',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]] + ['mi_5fheap_5farea_5ft_0',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]], + ['mi_5fstl_5fallocator_1',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/close.svg b/depends/mimalloc/docs/search/close.svg new file mode 100644 index 000000000000..337d6cc13298 --- /dev/null +++ b/depends/mimalloc/docs/search/close.svg @@ -0,0 +1,18 @@ + + + + + + diff --git a/depends/mimalloc/docs/search/enums_0.js b/depends/mimalloc/docs/search/enums_0.js index 6f1f38338b6c..9bc2f56b2b0c 100644 --- a/depends/mimalloc/docs/search/enums_0.js +++ b/depends/mimalloc/docs/search/enums_0.js @@ -1,4 +1,4 @@ var searchData= [ - ['mi_5foption_5ft_296',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]] + ['mi_5foption_5ft_0',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]] ]; diff --git a/depends/mimalloc/docs/search/enumvalues_0.js b/depends/mimalloc/docs/search/enumvalues_0.js index 1aca63bbee65..cd7bb4196726 100644 --- a/depends/mimalloc/docs/search/enumvalues_0.js +++ b/depends/mimalloc/docs/search/enumvalues_0.js @@ -1,4 +1,4 @@ var searchData= [ - ['_5fmi_5foption_5flast_297',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]] + ['_5fmi_5foption_5flast_0',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]] ]; diff --git a/depends/mimalloc/docs/search/enumvalues_1.js b/depends/mimalloc/docs/search/enumvalues_1.js index bd525bb854f3..d40f943b33d6 100644 --- a/depends/mimalloc/docs/search/enumvalues_1.js +++ b/depends/mimalloc/docs/search/enumvalues_1.js @@ -1,19 +1,31 @@ var searchData= [ - ['mi_5foption_5feager_5fcommit_298',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], - ['mi_5foption_5feager_5fcommit_5fdelay_299',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], - ['mi_5foption_5feager_5fregion_5fcommit_300',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]], - ['mi_5foption_5flarge_5fos_5fpages_301',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]], - ['mi_5foption_5fos_5ftag_302',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]], - ['mi_5foption_5fpage_5freset_303',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]], - ['mi_5foption_5freserve_5fhuge_5fos_5fpages_304',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], - ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_305',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]], - ['mi_5foption_5freset_5fdecommits_306',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]], - ['mi_5foption_5freset_5fdelay_307',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]], - ['mi_5foption_5fsegment_5fcache_308',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]], - ['mi_5foption_5fsegment_5freset_309',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]], - ['mi_5foption_5fshow_5ferrors_310',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], - ['mi_5foption_5fshow_5fstats_311',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], - ['mi_5foption_5fuse_5fnuma_5fnodes_312',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], - ['mi_5foption_5fverbose_313',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]] + ['mi_5foption_5fabandoned_5fpage_5fpurge_0',['mi_option_abandoned_page_purge',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca11e62ed69200a489a5be955582078c0c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fabandoned_5freclaim_5fon_5ffree_1',['mi_option_abandoned_reclaim_on_free',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca009e4b5684922ce664d73d2a8e1698d9',1,'mimalloc-doc.h']]], + ['mi_5foption_5fallow_5flarge_5fos_5fpages_2',['mi_option_allow_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7cc4804ced69004fa42a9a136a9ba556',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5feager_5fcommit_3',['mi_option_arena_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafd0c5ddbc4b59fd8b5216871728167a5',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5fpurge_5fmult_4',['mi_option_arena_purge_mult',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8236501f1ab45d26e6fd885d191a2b5e',1,'mimalloc-doc.h']]], + ['mi_5foption_5farena_5freserve_5',['mi_option_arena_reserve',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cab1c88e23ae290bbeec824038a97959de',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdestroy_5fon_5fexit_6',['mi_option_destroy_on_exit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca6364331e305e7d3c0218b058ff3afc88',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisallow_5farena_5falloc_7',['mi_option_disallow_arena_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caeae1696100e4057ffc4182730cc04e40',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisallow_5fos_5falloc_8',['mi_option_disallow_os_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadcfb5a09580361b1be65901d2d812de6',1,'mimalloc-doc.h']]], + ['mi_5foption_5feager_5fcommit_9',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], + ['mi_5foption_5feager_5fcommit_5fdelay_10',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], + ['mi_5foption_5flimit_5fos_5falloc_11',['mi_option_limit_os_alloc',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9fa61bd9668479f8452d2195759444cc',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5ferrors_12',['mi_option_max_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caec6ecbe29d46a48205ed8823a8a52a6a',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5fsegment_5freclaim_13',['mi_option_max_segment_reclaim',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa9ad9005d7017c8c30ad2d6ba31db909',1,'mimalloc-doc.h']]], + ['mi_5foption_5fmax_5fwarnings_14',['mi_option_max_warnings',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caf9595921087e942602ee079158762665',1,'mimalloc-doc.h']]], + ['mi_5foption_5fos_5ftag_15',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fdecommits_16',['mi_option_purge_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca9d15c5e3d2115eef681c17e4dd5ab9a4',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fdelay_17',['mi_option_purge_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cadd351e615acd8563529c20a347be7290',1,'mimalloc-doc.h']]], + ['mi_5foption_5fpurge_5fextend_5fdelay_18',['mi_option_purge_extend_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca02005f164bdf03f5f00c5be726adf487',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fhuge_5fos_5fpages_19',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_20',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]], + ['mi_5foption_5freserve_5fos_5fmemory_21',['mi_option_reserve_os_memory',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4999c828cf79a0fb2de65d23f7333',1,'mimalloc-doc.h']]], + ['mi_5foption_5fretry_5fon_5foom_22',['mi_option_retry_on_oom',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca8f51df355bf6651db899e6085b54865e',1,'mimalloc-doc.h']]], + ['mi_5foption_5fshow_5ferrors_23',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], + ['mi_5foption_5fshow_5fstats_24',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], + ['mi_5foption_5fuse_5fnuma_5fnodes_25',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], + ['mi_5foption_5fverbose_26',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]], + ['mi_5foption_5fvisit_5fabandoned_27',['mi_option_visit_abandoned',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca38c67733a3956a1f4eeaca89fab9e78e',1,'mimalloc-doc.h']]] ]; diff --git a/depends/mimalloc/docs/search/functions_0.js b/depends/mimalloc/docs/search/functions_0.js index b44917a5be23..8899d6e4ba47 100644 --- a/depends/mimalloc/docs/search/functions_0.js +++ b/depends/mimalloc/docs/search/functions_0.js @@ -1,116 +1,138 @@ var searchData= [ - ['mi_5f_5fposix_5fmemalign_173',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]], - ['mi_5faligned_5falloc_174',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_175',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_5faligned_176',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]], - ['mi_5fcalloc_5faligned_5fat_177',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]], - ['mi_5fcfree_178',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]], - ['mi_5fcheck_5fowned_179',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], - ['mi_5fcollect_180',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], - ['mi_5fexpand_181',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]], - ['mi_5ffree_182',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], - ['mi_5ffree_5faligned_183',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], - ['mi_5ffree_5fsize_184',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]], - ['mi_5ffree_5fsize_5faligned_185',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]], - ['mi_5fgood_5fsize_186',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_187',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_5faligned_188',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcalloc_5faligned_5fat_189',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcheck_5fowned_190',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcollect_191',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fcontains_5fblock_192',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fdelete_193',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fdestroy_194',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fget_5fbacking_195',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fget_5fdefault_196',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_197',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5faligned_198',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5faligned_5fat_199',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmalloc_5fsmall_200',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fmallocn_201',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fnew_202',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_203',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_5faligned_204',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealloc_5faligned_5fat_205',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]], - ['mi_5fheap_5freallocf_206',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]], - ['mi_5fheap_5freallocn_207',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frealpath_208',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_209',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_5faligned_210',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frecalloc_5faligned_5fat_211',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_212',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_5faligned_213',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]], - ['mi_5fheap_5frezalloc_5faligned_5fat_214',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fset_5fdefault_215',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fstrdup_216',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fstrndup_217',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fvisit_5fblocks_218',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_219',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_5faligned_220',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]], - ['mi_5fheap_5fzalloc_5faligned_5fat_221',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]], - ['mi_5fis_5fin_5fheap_5fregion_222',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]], - ['mi_5fis_5fredirected_223',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_224',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5faligned_225',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5faligned_5fat_226',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fsize_227',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fsmall_228',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]], - ['mi_5fmalloc_5fusable_5fsize_229',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]], - ['mi_5fmallocn_230',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]], - ['mi_5fmanage_5fos_5fmemory_231',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]], - ['mi_5fmemalign_232',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]], - ['mi_5fnew_233',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_234',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_5fnothrow_235',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fn_236',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fnothrow_237',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], - ['mi_5fnew_5frealloc_238',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]], - ['mi_5fnew_5freallocn_239',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]], - ['mi_5foption_5fdisable_240',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]], - ['mi_5foption_5fenable_241',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]], - ['mi_5foption_5fget_242',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]], - ['mi_5foption_5fis_5fenabled_243',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_244',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fdefault_245',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fenabled_246',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]], - ['mi_5foption_5fset_5fenabled_5fdefault_247',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]], - ['mi_5fposix_5fmemalign_248',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]], - ['mi_5fprocess_5finfo_249',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]], - ['mi_5fpvalloc_250',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]], - ['mi_5frealloc_251',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]], - ['mi_5frealloc_5faligned_252',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]], - ['mi_5frealloc_5faligned_5fat_253',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]], - ['mi_5freallocarr_254',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]], - ['mi_5freallocarray_255',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]], - ['mi_5freallocf_256',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]], - ['mi_5freallocn_257',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]], - ['mi_5frealpath_258',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_259',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_5faligned_260',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], - ['mi_5frecalloc_5faligned_5fat_261',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], - ['mi_5fregister_5fdeferred_5ffree_262',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], - ['mi_5fregister_5ferror_263',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], - ['mi_5fregister_5foutput_264',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages_5fat_265',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_266',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fos_5fmemory_267',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_268',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_5faligned_269',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]], - ['mi_5frezalloc_5faligned_5fat_270',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fmerge_271',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint_272',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint_5fout_273',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]], - ['mi_5fstats_5freset_274',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], - ['mi_5fstrdup_275',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]], - ['mi_5fstrndup_276',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fdone_277',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], - ['mi_5fthread_5finit_278',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fstats_5fprint_5fout_279',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], - ['mi_5fusable_5fsize_280',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], - ['mi_5fvalloc_281',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_282',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5faligned_283',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5faligned_5fat_284',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]], - ['mi_5fzalloc_5fsmall_285',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]] + ['mi_5f_5fexpand_0',['mi__expand',['../group__posix.html#ga66bcfeb4faedbb42b796bc680821ef84',1,'mimalloc-doc.h']]], + ['mi_5f_5fposix_5fmemalign_1',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]], + ['mi_5fabandoned_5fvisit_5fblocks_2',['mi_abandoned_visit_blocks',['../group__analysis.html#ga6a4865a887b2ec5247854af61562503c',1,'mimalloc-doc.h']]], + ['mi_5faligned_5falloc_3',['mi_aligned_alloc',['../group__posix.html#ga430ed1513f0571ff83be00ec58a98ee0',1,'mimalloc-doc.h']]], + ['mi_5faligned_5foffset_5frecalloc_4',['mi_aligned_offset_recalloc',['../group__posix.html#ga16570deddd559001b44953eedbad0084',1,'mimalloc-doc.h']]], + ['mi_5faligned_5frecalloc_5',['mi_aligned_recalloc',['../group__posix.html#gaf82cbb4b4f24acf723348628451798d3',1,'mimalloc-doc.h']]], + ['mi_5farena_5farea_6',['mi_arena_area',['../group__extended.html#ga9a25a00a22151619a0be91a10af7787f',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_7',['mi_calloc',['../group__malloc.html#ga6686568014b54d1e6c7ac64a076e4f56',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_5faligned_8',['mi_calloc_aligned',['../group__aligned.html#ga424ef386fb1f9f8e0a86ab53f16eaaf1',1,'mimalloc-doc.h']]], + ['mi_5fcalloc_5faligned_5fat_9',['mi_calloc_aligned_at',['../group__aligned.html#ga977f96bd2c5c141bcd70e6685c90d6c3',1,'mimalloc-doc.h']]], + ['mi_5fcfree_10',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]], + ['mi_5fcheck_5fowned_11',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], + ['mi_5fcollect_12',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], + ['mi_5fdebug_5fshow_5farenas_13',['mi_debug_show_arenas',['../group__extended.html#gad7439207f8f71fb6c382a9ea20b997e7',1,'mimalloc-doc.h']]], + ['mi_5fdupenv_5fs_14',['mi_dupenv_s',['../group__posix.html#gab41369c1a1da7504013a7a0b1d4dd958',1,'mimalloc-doc.h']]], + ['mi_5fexpand_15',['mi_expand',['../group__malloc.html#ga19299856216cfbb08e2628593654dfb0',1,'mimalloc-doc.h']]], + ['mi_5ffree_16',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], + ['mi_5ffree_5faligned_17',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], + ['mi_5ffree_5fsize_18',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]], + ['mi_5ffree_5fsize_5faligned_19',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]], + ['mi_5fgood_5fsize_20',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_21',['mi_heap_calloc',['../group__heap.html#gac0098aaf231d3e9586c73136d5df95da',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_5faligned_22',['mi_heap_calloc_aligned',['../group__heap.html#gacafcc26df827c7a7de5e850217566108',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcalloc_5faligned_5fat_23',['mi_heap_calloc_aligned_at',['../group__heap.html#gaa42ec2079989c4374f2c331d9b35f4e4',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcheck_5fowned_24',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcollect_25',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fcontains_5fblock_26',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fdelete_27',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fdestroy_28',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fget_5fbacking_29',['mi_heap_get_backing',['../group__heap.html#gac6ac9f0e7be9ab4ff70acfc8dad1235a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fget_5fdefault_30',['mi_heap_get_default',['../group__heap.html#ga14c667a6e2c5d28762d8cb7d4e057909',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_31',['mi_heap_malloc',['../group__heap.html#gab374e206c7034e0d899fb934e4f4a863',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5faligned_32',['mi_heap_malloc_aligned',['../group__heap.html#ga33f4f05b7fea7af2113c62a4bf882cc5',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5faligned_5fat_33',['mi_heap_malloc_aligned_at',['../group__heap.html#gae7ffc045c3996497a7f3a5f6fe7b8aaa',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmalloc_5fsmall_34',['mi_heap_malloc_small',['../group__heap.html#ga012c5c8abe22b10043de39ff95909541',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fmallocn_35',['mi_heap_mallocn',['../group__heap.html#gab0f755c0b21c387fe8e9024200faa372',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_36',['mi_heap_new',['../group__heap.html#gaa718bb226ec0546ba6d1b6cb32179f3a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_5fex_37',['mi_heap_new_ex',['../group__extended.html#ga3ae360583f4351aa5267ee7e43008faf',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fnew_5fin_5farena_38',['mi_heap_new_in_arena',['../group__extended.html#gaaf2d9976576d5efd5544be12848af949',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_39',['mi_heap_realloc',['../group__heap.html#gac5252d6a2e510bd349e4fcb452e6a93a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_5faligned_40',['mi_heap_realloc_aligned',['../group__heap.html#gaccf8c249872f30bf1c2493a09197d734',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealloc_5faligned_5fat_41',['mi_heap_realloc_aligned_at',['../group__heap.html#ga6df988a7219d5707f010d5f3eb0dc3f5',1,'mimalloc-doc.h']]], + ['mi_5fheap_5freallocf_42',['mi_heap_reallocf',['../group__heap.html#gae7cd171425bee04c683c65a3701f0b4a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5freallocn_43',['mi_heap_reallocn',['../group__heap.html#gaccf7bfe10ce510a000d3547d9cf7fa29',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frealpath_44',['mi_heap_realpath',['../group__heap.html#ga55545a3ec6da29c5b4f62e540ecac1e2',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_45',['mi_heap_recalloc',['../group__zeroinit.html#gad1a0d325d930eeb80f25e3fea37aacde',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_5faligned_46',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga87ddd674bf1c67237d780d0b9e0f0f32',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frecalloc_5faligned_5fat_47',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga07b5bcbaf00d0d2e598c232982588496',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_48',['mi_heap_rezalloc',['../group__zeroinit.html#ga8d8b7ebb24b513cd84d1a696048da60d',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_5faligned_49',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga5129f6dc46ee1613d918820a8a0533a7',1,'mimalloc-doc.h']]], + ['mi_5fheap_5frezalloc_5faligned_5fat_50',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#ga2bafa79c3f98ea74882349d44cffa5d9',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fset_5fdefault_51',['mi_heap_set_default',['../group__heap.html#ga349b677dec7da5eacdbc7a385bd62a4a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fstrdup_52',['mi_heap_strdup',['../group__heap.html#ga5754e09ccc51dd6bc73885bb6ea21b7a',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fstrndup_53',['mi_heap_strndup',['../group__heap.html#gad224df78f1fbee942df8adf023e12cf3',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fvisit_5fblocks_54',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_55',['mi_heap_zalloc',['../group__heap.html#gabebc796399619d964d8db77aa835e8c1',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_5faligned_56',['mi_heap_zalloc_aligned',['../group__heap.html#ga6466bde8b5712aa34e081a8317f9f471',1,'mimalloc-doc.h']]], + ['mi_5fheap_5fzalloc_5faligned_5fat_57',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga484e3d01cd174f78c7e53370e5a7c819',1,'mimalloc-doc.h']]], + ['mi_5fis_5fin_5fheap_5fregion_58',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]], + ['mi_5fis_5fredirected_59',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_60',['mi_malloc',['../group__malloc.html#gae1dd97b542420c87ae085e822b1229e8',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5faligned_61',['mi_malloc_aligned',['../group__aligned.html#ga69578ff1a98ca16e1dcd02c0995cd65c',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5faligned_5fat_62',['mi_malloc_aligned_at',['../group__aligned.html#ga2022f71b95a7cd6cce1b6e07752ae8ca',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fgood_5fsize_63',['mi_malloc_good_size',['../group__posix.html#ga9d23ac7885fed7413c11d8e0ffa31071',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fsize_64',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fsmall_65',['mi_malloc_small',['../group__extended.html#ga7f050bc6b897da82692174f5fce59cde',1,'mimalloc-doc.h']]], + ['mi_5fmalloc_5fusable_5fsize_66',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]], + ['mi_5fmallocn_67',['mi_mallocn',['../group__malloc.html#ga61f46bade3db76ca24aaafedc40de7b6',1,'mimalloc-doc.h']]], + ['mi_5fmanage_5fos_5fmemory_68',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]], + ['mi_5fmanage_5fos_5fmemory_5fex_69',['mi_manage_os_memory_ex',['../group__extended.html#ga41ce8525d77bbb60f618fa1029994f6e',1,'mimalloc-doc.h']]], + ['mi_5fmbsdup_70',['mi_mbsdup',['../group__posix.html#ga7b82a44094fdec4d2084eb4288a979b0',1,'mimalloc-doc.h']]], + ['mi_5fmemalign_71',['mi_memalign',['../group__posix.html#ga726867f13fd29ca36064954c0285b1d8',1,'mimalloc-doc.h']]], + ['mi_5fnew_72',['mi_new',['../group__cpp.html#ga633d96e3bc7011f960df9f3b2731fc6a',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_73',['mi_new_aligned',['../group__cpp.html#ga79c54da0b4b4ce9fcc11d2f6ef6675f8',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_5fnothrow_74',['mi_new_aligned_nothrow',['../group__cpp.html#ga92ae00b6dd64406c7e64557711ec04b7',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn_75',['mi_new_n',['../group__cpp.html#gadd11b85c15d21d308386844b5233856c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fnothrow_76',['mi_new_nothrow',['../group__cpp.html#ga5cb4f120d1f7296074256215aa9a9e54',1,'mimalloc-doc.h']]], + ['mi_5fnew_5frealloc_77',['mi_new_realloc',['../group__cpp.html#ga6867d89baf992728e0cc20a1f47db4d0',1,'mimalloc-doc.h']]], + ['mi_5fnew_5freallocn_78',['mi_new_reallocn',['../group__cpp.html#gaace912ce086682d56f3ce9f7638d9d67',1,'mimalloc-doc.h']]], + ['mi_5foption_5fdisable_79',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]], + ['mi_5foption_5fenable_80',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_81',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_5fclamp_82',['mi_option_get_clamp',['../group__options.html#ga96ad9c406338bd314cfe878cfc9bf723',1,'mimalloc-doc.h']]], + ['mi_5foption_5fget_5fsize_83',['mi_option_get_size',['../group__options.html#ga274db5a6ac87cc24ef0b23e7006ed02c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fis_5fenabled_84',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_85',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fdefault_86',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fenabled_87',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]], + ['mi_5foption_5fset_5fenabled_5fdefault_88',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]], + ['mi_5fposix_5fmemalign_89',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]], + ['mi_5fprocess_5finfo_90',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]], + ['mi_5fpvalloc_91',['mi_pvalloc',['../group__posix.html#ga644bebccdbb2821542dd8c7e7641f476',1,'mimalloc-doc.h']]], + ['mi_5frealloc_92',['mi_realloc',['../group__malloc.html#ga0621af6a5e3aa384e6a1b548958bf583',1,'mimalloc-doc.h']]], + ['mi_5frealloc_5faligned_93',['mi_realloc_aligned',['../group__aligned.html#ga5d7a46d054b4d7abe9d8d2474add2edf',1,'mimalloc-doc.h']]], + ['mi_5frealloc_5faligned_5fat_94',['mi_realloc_aligned_at',['../group__aligned.html#gad06dcf2bb8faadb2c8ea61ee5d24bbf6',1,'mimalloc-doc.h']]], + ['mi_5freallocarr_95',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]], + ['mi_5freallocarray_96',['mi_reallocarray',['../group__posix.html#gadfeccb72748a2f6305474a37d9d57bce',1,'mimalloc-doc.h']]], + ['mi_5freallocf_97',['mi_reallocf',['../group__malloc.html#ga4dc3a4067037b151a64629fe8a332641',1,'mimalloc-doc.h']]], + ['mi_5freallocn_98',['mi_reallocn',['../group__malloc.html#ga8bddfb4a1270a0854bbcf44cb3980467',1,'mimalloc-doc.h']]], + ['mi_5frealpath_99',['mi_realpath',['../group__malloc.html#ga94c3afcc086e85d75a57e9f76b9b71dd',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_100',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_5faligned_101',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e2169b48683aa0ab64f813fd68d839e',1,'mimalloc-doc.h']]], + ['mi_5frecalloc_5faligned_5fat_102',['mi_recalloc_aligned_at',['../group__zeroinit.html#gaae25e4ddedd4e0fb61b1a8bd5d452750',1,'mimalloc-doc.h']]], + ['mi_5fregister_5fdeferred_5ffree_103',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5ferror_104',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], + ['mi_5fregister_5foutput_105',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat_106',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat_5fex_107',['mi_reserve_huge_os_pages_at_ex',['../group__extended.html#ga591aab1c2bc2ca920e33f0f9f9cb5c52',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_108',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fos_5fmemory_109',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fos_5fmemory_5fex_110',['mi_reserve_os_memory_ex',['../group__extended.html#ga32f519797fd9a81acb4f52d36e6d751b',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_111',['mi_rezalloc',['../group__zeroinit.html#gadfd34cd7b4f2bbda7ae06367a6360756',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_5faligned_112',['mi_rezalloc_aligned',['../group__zeroinit.html#ga4d02404fe1e7db00beb65f185e012caa',1,'mimalloc-doc.h']]], + ['mi_5frezalloc_5faligned_5fat_113',['mi_rezalloc_aligned_at',['../group__zeroinit.html#ga6843a88285bbfcc3bdfccc60aafd1270',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fmerge_114',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint_115',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint_5fout_116',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]], + ['mi_5fstats_5freset_117',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], + ['mi_5fstrdup_118',['mi_strdup',['../group__malloc.html#ga245ac90ebc2cfdd17de599e5fea59889',1,'mimalloc-doc.h']]], + ['mi_5fstrndup_119',['mi_strndup',['../group__malloc.html#ga486d0d26b3b3794f6d1cdb41a9aed92d',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fadd_5fcurrent_5fthread_120',['mi_subproc_add_current_thread',['../group__extended.html#gadbc53414eb68b275588ec001ce1ddc7c',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fdelete_121',['mi_subproc_delete',['../group__extended.html#gaa7d263e9429bac9ac8345c9d25de610e',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fmain_122',['mi_subproc_main',['../group__extended.html#ga2ecba0d7ebdc99e71bb985c4a1609806',1,'mimalloc-doc.h']]], + ['mi_5fsubproc_5fnew_123',['mi_subproc_new',['../group__extended.html#ga8068cac328e41fa2170faef707315243',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fdone_124',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], + ['mi_5fthread_5finit_125',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fstats_5fprint_5fout_126',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], + ['mi_5fusable_5fsize_127',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], + ['mi_5fvalloc_128',['mi_valloc',['../group__posix.html#ga50cafb9722020402f065de93799f64ca',1,'mimalloc-doc.h']]], + ['mi_5fwcsdup_129',['mi_wcsdup',['../group__posix.html#gaa9fd7f25c9ac3a20e89b33bd6e383fcf',1,'mimalloc-doc.h']]], + ['mi_5fwdupenv_5fs_130',['mi_wdupenv_s',['../group__posix.html#ga6ac6a6a8f3c96f1af24bb8d0439cbbd1',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_131',['mi_zalloc',['../group__malloc.html#gae6e38c4403247a7b40d80419e093bfb8',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5faligned_132',['mi_zalloc_aligned',['../group__aligned.html#gaac7d0beb782f9b9ac31f47492b130f82',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5faligned_5fat_133',['mi_zalloc_aligned_at',['../group__aligned.html#ga7c1778805ce50ebbf02ccbd5e39d5dba',1,'mimalloc-doc.h']]], + ['mi_5fzalloc_5fsmall_134',['mi_zalloc_small',['../group__extended.html#ga51c47637e81df0e2f13a2d7a2dec123e',1,'mimalloc-doc.h']]] ]; diff --git a/depends/mimalloc/docs/search/groups_0.js b/depends/mimalloc/docs/search/groups_0.js index 0ed99b804cea..d371a39915e0 100644 --- a/depends/mimalloc/docs/search/groups_0.js +++ b/depends/mimalloc/docs/search/groups_0.js @@ -1,4 +1,6 @@ var searchData= [ - ['aligned_20allocation_314',['Aligned Allocation',['../group__aligned.html',1,'']]] + ['aligned_20allocation_0',['Aligned Allocation',['../group__aligned.html',1,'']]], + ['allocation_1',['Allocation',['../group__aligned.html',1,'Aligned Allocation'],['../group__malloc.html',1,'Basic Allocation'],['../group__heap.html',1,'Heap Allocation']]], + ['allocation_2',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_1.js b/depends/mimalloc/docs/search/groups_1.js index f27c5847a097..8fca27170125 100644 --- a/depends/mimalloc/docs/search/groups_1.js +++ b/depends/mimalloc/docs/search/groups_1.js @@ -1,4 +1,4 @@ var searchData= [ - ['basic_20allocation_315',['Basic Allocation',['../group__malloc.html',1,'']]] + ['basic_20allocation_0',['Basic Allocation',['../group__malloc.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_2.js b/depends/mimalloc/docs/search/groups_2.js index 6da64b68be2a..d260dec8a03d 100644 --- a/depends/mimalloc/docs/search/groups_2.js +++ b/depends/mimalloc/docs/search/groups_2.js @@ -1,4 +1,4 @@ var searchData= [ - ['c_2b_2b_20wrappers_316',['C++ wrappers',['../group__cpp.html',1,'']]] + ['c_20wrappers_0',['C++ wrappers',['../group__cpp.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_3.js b/depends/mimalloc/docs/search/groups_3.js index cdfbe640a16a..7099da010d37 100644 --- a/depends/mimalloc/docs/search/groups_3.js +++ b/depends/mimalloc/docs/search/groups_3.js @@ -1,4 +1,4 @@ var searchData= [ - ['extended_20functions_317',['Extended Functions',['../group__extended.html',1,'']]] + ['extended_20functions_0',['Extended Functions',['../group__extended.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_4.js b/depends/mimalloc/docs/search/groups_4.js index 687f1ea72825..87efcce9a042 100644 --- a/depends/mimalloc/docs/search/groups_4.js +++ b/depends/mimalloc/docs/search/groups_4.js @@ -1,5 +1,4 @@ var searchData= [ - ['heap_20allocation_318',['Heap Allocation',['../group__heap.html',1,'']]], - ['heap_20introspection_319',['Heap Introspection',['../group__analysis.html',1,'']]] + ['functions_0',['Extended Functions',['../group__extended.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_5.js b/depends/mimalloc/docs/search/groups_5.js index 43c8b1fc4743..3131f4a2ec05 100644 --- a/depends/mimalloc/docs/search/groups_5.js +++ b/depends/mimalloc/docs/search/groups_5.js @@ -1,4 +1,5 @@ var searchData= [ - ['posix_320',['Posix',['../group__posix.html',1,'']]] + ['heap_20allocation_0',['Heap Allocation',['../group__heap.html',1,'']]], + ['heap_20introspection_1',['Heap Introspection',['../group__analysis.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_6.js b/depends/mimalloc/docs/search/groups_6.js index 346318794bc3..6d20843280ff 100644 --- a/depends/mimalloc/docs/search/groups_6.js +++ b/depends/mimalloc/docs/search/groups_6.js @@ -1,4 +1,5 @@ var searchData= [ - ['runtime_20options_321',['Runtime Options',['../group__options.html',1,'']]] + ['initialized_20re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]], + ['introspection_1',['Heap Introspection',['../group__analysis.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_7.js b/depends/mimalloc/docs/search/groups_7.js index aa150e973f4b..aafc4dae67f3 100644 --- a/depends/mimalloc/docs/search/groups_7.js +++ b/depends/mimalloc/docs/search/groups_7.js @@ -1,4 +1,4 @@ var searchData= [ - ['typed_20macros_322',['Typed Macros',['../group__typed.html',1,'']]] + ['macros_0',['Typed Macros',['../group__typed.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_8.js b/depends/mimalloc/docs/search/groups_8.js index f9c29fe3bc1d..30681f7b5cd7 100644 --- a/depends/mimalloc/docs/search/groups_8.js +++ b/depends/mimalloc/docs/search/groups_8.js @@ -1,4 +1,4 @@ var searchData= [ - ['zero_20initialized_20re_2dallocation_323',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] + ['options_0',['Runtime Options',['../group__options.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/groups_9.js b/depends/mimalloc/docs/search/groups_9.js new file mode 100644 index 000000000000..bc3429469219 --- /dev/null +++ b/depends/mimalloc/docs/search/groups_9.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['posix_0',['Posix',['../group__posix.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/groups_a.js b/depends/mimalloc/docs/search/groups_a.js new file mode 100644 index 000000000000..5f2947aafb6b --- /dev/null +++ b/depends/mimalloc/docs/search/groups_a.js @@ -0,0 +1,5 @@ +var searchData= +[ + ['re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]], + ['runtime_20options_1',['Runtime Options',['../group__options.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/groups_b.js b/depends/mimalloc/docs/search/groups_b.js new file mode 100644 index 000000000000..56eb25eae6e9 --- /dev/null +++ b/depends/mimalloc/docs/search/groups_b.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['typed_20macros_0',['Typed Macros',['../group__typed.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/groups_c.js b/depends/mimalloc/docs/search/groups_c.js new file mode 100644 index 000000000000..8f445b9fc8c2 --- /dev/null +++ b/depends/mimalloc/docs/search/groups_c.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['wrappers_0',['C++ wrappers',['../group__cpp.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/groups_d.js b/depends/mimalloc/docs/search/groups_d.js new file mode 100644 index 000000000000..1437d04ac002 --- /dev/null +++ b/depends/mimalloc/docs/search/groups_d.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_20allocation_0',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/pages_0.js b/depends/mimalloc/docs/search/pages_0.js index 07922dae7add..9c92133c3c92 100644 --- a/depends/mimalloc/docs/search/pages_0.js +++ b/depends/mimalloc/docs/search/pages_0.js @@ -1,4 +1,4 @@ var searchData= [ - ['building_324',['Building',['../build.html',1,'']]] + ['building_0',['Building',['../build.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/pages_1.js b/depends/mimalloc/docs/search/pages_1.js index 6433daeccef5..4e3b12eec4d1 100644 --- a/depends/mimalloc/docs/search/pages_1.js +++ b/depends/mimalloc/docs/search/pages_1.js @@ -1,4 +1,4 @@ var searchData= [ - ['environment_20options_325',['Environment Options',['../environment.html',1,'']]] + ['environment_20options_0',['Environment Options',['../environment.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/pages_2.js b/depends/mimalloc/docs/search/pages_2.js index 7577377b9719..5071ed69e8a2 100644 --- a/depends/mimalloc/docs/search/pages_2.js +++ b/depends/mimalloc/docs/search/pages_2.js @@ -1,4 +1,4 @@ var searchData= [ - ['overriding_20malloc_326',['Overriding Malloc',['../overrides.html',1,'']]] + ['library_0',['Using the library',['../using.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/pages_3.js b/depends/mimalloc/docs/search/pages_3.js index d62a3cfd1322..4c761a084669 100644 --- a/depends/mimalloc/docs/search/pages_3.js +++ b/depends/mimalloc/docs/search/pages_3.js @@ -1,4 +1,6 @@ var searchData= [ - ['performance_327',['Performance',['../bench.html',1,'']]] + ['malloc_0',['Overriding Malloc',['../overrides.html',1,'']]], + ['malloc_1',['mi-malloc',['../index.html',1,'']]], + ['mi_20malloc_2',['mi-malloc',['../index.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/pages_4.js b/depends/mimalloc/docs/search/pages_4.js index 4e4e64dc489e..a0c748ac09be 100644 --- a/depends/mimalloc/docs/search/pages_4.js +++ b/depends/mimalloc/docs/search/pages_4.js @@ -1,4 +1,5 @@ var searchData= [ - ['using_20the_20library_328',['Using the library',['../using.html',1,'']]] + ['options_0',['Environment Options',['../environment.html',1,'']]], + ['overriding_20malloc_1',['Overriding Malloc',['../overrides.html',1,'']]] ]; diff --git a/depends/mimalloc/docs/search/pages_5.js b/depends/mimalloc/docs/search/pages_5.js new file mode 100644 index 000000000000..3142480928b9 --- /dev/null +++ b/depends/mimalloc/docs/search/pages_5.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['performance_0',['Performance',['../bench.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/pages_6.js b/depends/mimalloc/docs/search/pages_6.js new file mode 100644 index 000000000000..308a1780009f --- /dev/null +++ b/depends/mimalloc/docs/search/pages_6.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['the_20library_0',['Using the library',['../using.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/pages_7.js b/depends/mimalloc/docs/search/pages_7.js new file mode 100644 index 000000000000..4806a8183e75 --- /dev/null +++ b/depends/mimalloc/docs/search/pages_7.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['using_20the_20library_0',['Using the library',['../using.html',1,'']]] +]; diff --git a/depends/mimalloc/docs/search/search.css b/depends/mimalloc/docs/search/search.css index 10bd4b55d93a..190ed7fdbb7b 100644 --- a/depends/mimalloc/docs/search/search.css +++ b/depends/mimalloc/docs/search/search.css @@ -1,100 +1,111 @@ /*---------------- Search Box */ -#FSearchBox { - float: left; +#MSearchBox { + position: absolute; + right: 5px; +} +/*---------------- Search box styling */ + +.SRPage * { + font-weight: normal; + line-height: normal; +} + +dark-mode-toggle { + margin-left: 5px; + display: flex; + float: right; } #MSearchBox { + display: inline-block; white-space : nowrap; - float: none; - margin-top: 0px; - right: 0px; - width: 170px; - height: 24px; + background: white; + border-radius: 0.65em; + box-shadow: inset 0.5px 0.5px 3px 0px #555; z-index: 102; - display: inline; - position: absolute; } -#MSearchBox .left -{ - display:block; - position:absolute; - left:10px; - width:20px; - height:19px; - background:url('search_l.png') no-repeat; - background-position:right; +#MSearchBox .left { + display: inline-block; + vertical-align: middle; + height: 1.4em; } #MSearchSelect { - display:block; - position:absolute; - width:20px; - height:19px; + display: inline-block; + vertical-align: middle; + width: 20px; + height: 19px; + background-image: url('mag_sel.svg'); + margin: 0 0 0 0.3em; + padding: 0; } -.left #MSearchSelect { - left:4px; +#MSearchSelectExt { + display: inline-block; + vertical-align: middle; + width: 10px; + height: 19px; + background-image: url('mag.svg'); + margin: 0 0 0 0.5em; + padding: 0; } -.right #MSearchSelect { - right:5px; -} #MSearchField { - display:block; - position:absolute; - height:19px; - background:url('search_m.png') repeat-x; + display: inline-block; + vertical-align: middle; + width: 7.5em; + height: 19px; + margin: 0 0.15em; + padding: 0; + line-height: 1em; border:none; - width:111px; - margin-left:20px; - padding-left:4px; color: #909090; outline: none; - font: 9pt Arial, Verdana, sans-serif; + font-family: Arial,Verdana,sans-serif; -webkit-border-radius: 0px; + border-radius: 0px; + background: none; } -#FSearchBox #MSearchField { - margin-left:15px; +@media(hover: none) { + /* to avoid zooming on iOS */ + #MSearchField { + font-size: 16px; + } } #MSearchBox .right { - display:block; - position:absolute; - right:10px; - top:0px; - width:20px; - height:19px; - background:url('search_r.png') no-repeat; - background-position:left; + display: inline-block; + vertical-align: middle; + width: 1.4em; + height: 1.4em; } #MSearchClose { display: none; - position: absolute; - top: 4px; + font-size: inherit; background : none; border: none; - margin: 0px 4px 0px 0px; - padding: 0px 0px; + margin: 0; + padding: 0; outline: none; -} -.left #MSearchClose { - left: 6px; } -.right #MSearchClose { - right: 2px; +#MSearchCloseImg { + padding: 0.3em; + margin: 0; } .MSearchBoxActive #MSearchField { - color: #000000; + color: black; } + + /*---------------- Search filter selection */ #MSearchSelectWindow { @@ -115,7 +126,7 @@ } .SelectItem { - font: 8pt Arial, Verdana, sans-serif; + font: 8pt Arial,Verdana,sans-serif; padding-left: 2px; padding-right: 12px; border: 0px; @@ -123,7 +134,7 @@ span.SelectionMark { margin-right: 4px; - font-family: monospace; + font-family: 'JetBrains Mono',Consolas,Monaco,'Andale Mono','Ubuntu Mono',monospace,fixed; outline-style: none; text-decoration: none; } @@ -131,7 +142,7 @@ span.SelectionMark { a.SelectItem { display: block; outline-style: none; - color: #000000; + color: black; text-decoration: none; padding-left: 6px; padding-right: 12px; @@ -139,13 +150,13 @@ a.SelectItem { a.SelectItem:focus, a.SelectItem:active { - color: #000000; + color: black; outline-style: none; text-decoration: none; } a.SelectItem:hover { - color: #FFFFFF; + color: white; background-color: #0F1010; outline-style: none; text-decoration: none; @@ -156,7 +167,7 @@ a.SelectItem:hover { /*---------------- Search results window */ iframe#MSearchResults { - width: 60ex; + /*width: 60ex;*/ height: 15em; } @@ -164,9 +175,12 @@ iframe#MSearchResults { display: none; position: absolute; left: 0; top: 0; - border: 1px solid #000; + border: 1px solid black; background-color: #DADDDE; z-index:10000; + width: 300px; + height: 400px; + overflow: auto; } /* ----------------------------------- */ @@ -174,7 +188,6 @@ iframe#MSearchResults { #SRIndex { clear:both; - padding-bottom: 15px; } .SREntry { @@ -187,8 +200,9 @@ iframe#MSearchResults { padding: 1px 5px; } -body.SRPage { +div.SRPage { margin: 5px 2px; + background-color: #DADDDE; } .SRChildren { @@ -200,17 +214,18 @@ body.SRPage { } .SRSymbol { - font-weight: bold; + font-weight: bold; color: #121414; - font-family: Arial, Verdana, sans-serif; + font-family: Arial,Verdana,sans-serif; text-decoration: none; outline: none; } a.SRScope { display: block; - color: #121414; - font-family: Arial, Verdana, sans-serif; + color: #121414; + font-family: Arial,Verdana,sans-serif; + font-size: 8pt; text-decoration: none; outline: none; } @@ -222,29 +237,27 @@ a.SRScope:focus, a.SRScope:active { span.SRScope { padding-left: 4px; + font-family: Arial,Verdana,sans-serif; } .SRPage .SRStatus { padding: 2px 5px; font-size: 8pt; font-style: italic; + font-family: Arial,Verdana,sans-serif; } .SRResult { display: none; } -DIV.searchresults { +div.searchresults { margin-left: 10px; margin-right: 10px; } /*---------------- External search page results */ -.searchresult { - background-color: #DFE1E2; -} - .pages b { color: white; padding: 5px 5px 3px 5px; diff --git a/depends/mimalloc/docs/search/search.js b/depends/mimalloc/docs/search/search.js index fb226f734e6d..666af01e5ea2 100644 --- a/depends/mimalloc/docs/search/search.js +++ b/depends/mimalloc/docs/search/search.js @@ -22,56 +22,9 @@ @licend The above is the entire license notice for the JavaScript code in this file */ -function convertToId(search) -{ - var result = ''; - for (i=0;i document.getElementById("MSearchField"); + this.DOMSearchSelect = () => document.getElementById("MSearchSelect"); + this.DOMSearchSelectWindow = () => document.getElementById("MSearchSelectWindow"); + this.DOMPopupSearchResults = () => document.getElementById("MSearchResults"); + this.DOMPopupSearchResultsWindow = () => document.getElementById("MSearchResultsWindow"); + this.DOMSearchClose = () => document.getElementById("MSearchClose"); + this.DOMSearchBox = () => document.getElementById("MSearchBox"); // ------------ Event Handlers // Called when focus is added or removed from the search field. - this.OnSearchFieldFocus = function(isActive) - { + this.OnSearchFieldFocus = function(isActive) { this.Activate(isActive); } - this.OnSearchSelectShow = function() - { - var searchSelectWindow = this.DOMSearchSelectWindow(); - var searchField = this.DOMSearchSelect(); - - if (this.insideFrame) - { - var left = getXPos(searchField); - var top = getYPos(searchField); - left += searchField.offsetWidth + 6; - top += searchField.offsetHeight; - - // show search selection popup - searchSelectWindow.style.display='block'; - left -= searchSelectWindow.offsetWidth; - searchSelectWindow.style.left = left + 'px'; - searchSelectWindow.style.top = top + 'px'; - } - else - { - var left = getXPos(searchField); - var top = getYPos(searchField); - top += searchField.offsetHeight; - - // show search selection popup - searchSelectWindow.style.display='block'; - searchSelectWindow.style.left = left + 'px'; - searchSelectWindow.style.top = top + 'px'; - } + this.OnSearchSelectShow = function() { + const searchSelectWindow = this.DOMSearchSelectWindow(); + const searchField = this.DOMSearchSelect(); + + const left = getXPos(searchField); + const top = getYPos(searchField) + searchField.offsetHeight; + + // show search selection popup + searchSelectWindow.style.display='block'; + searchSelectWindow.style.left = left + 'px'; + searchSelectWindow.style.top = top + 'px'; // stop selection hide timer - if (this.hideTimeout) - { + if (this.hideTimeout) { clearTimeout(this.hideTimeout); this.hideTimeout=0; } return false; // to avoid "image drag" default event } - this.OnSearchSelectHide = function() - { - this.hideTimeout = setTimeout(this.name +".CloseSelectionWindow()", + this.OnSearchSelectHide = function() { + this.hideTimeout = setTimeout(this.CloseSelectionWindow.bind(this), this.closeSelectionTimeout); } // Called when the content of the search field is changed. - this.OnSearchFieldChange = function(evt) - { - if (this.keyTimeout) // kill running timer - { + this.OnSearchFieldChange = function(evt) { + if (this.keyTimeout) { // kill running timer clearTimeout(this.keyTimeout); this.keyTimeout = 0; } - var e = (evt) ? evt : window.event; // for IE - if (e.keyCode==40 || e.keyCode==13) - { - if (e.shiftKey==1) - { + const e = evt ? evt : window.event; // for IE + if (e.keyCode==40 || e.keyCode==13) { + if (e.shiftKey==1) { this.OnSearchSelectShow(); - var win=this.DOMSearchSelectWindow(); - for (i=0;i do a search - { + const searchValue = this.DOMSearchField().value.replace(/ +/g, ""); + if (searchValue!="" && this.searchActive) { // something was found -> do a search this.Search(); } } - this.OnSearchSelectKey = function(evt) - { - var e = (evt) ? evt : window.event; // for IE - if (e.keyCode==40 && this.searchIndex0) // Up - { + } else if (e.keyCode==38 && this.searchIndex>0) { // Up this.searchIndex--; this.OnSelectItem(this.searchIndex); - } - else if (e.keyCode==13 || e.keyCode==27) - { + } else if (e.keyCode==13 || e.keyCode==27) { + e.stopPropagation(); this.OnSelectItem(this.searchIndex); this.CloseSelectionWindow(); this.DOMSearchField().focus(); @@ -314,111 +239,108 @@ function SearchBox(name, resultsPath, inFrame, label, extension) // --------- Actions // Closes the results window. - this.CloseResultsWindow = function() - { + this.CloseResultsWindow = function() { this.DOMPopupSearchResultsWindow().style.display = 'none'; this.DOMSearchClose().style.display = 'none'; this.Activate(false); } - this.CloseSelectionWindow = function() - { + this.CloseSelectionWindow = function() { this.DOMSearchSelectWindow().style.display = 'none'; } // Performs a search. - this.Search = function() - { + this.Search = function() { this.keyTimeout = 0; // strip leading whitespace - var searchValue = this.DOMSearchField().value.replace(/^ +/, ""); + const searchValue = this.DOMSearchField().value.replace(/^ +/, ""); - var code = searchValue.toLowerCase().charCodeAt(0); - var idxChar = searchValue.substr(0, 1).toLowerCase(); - if ( 0xD800 <= code && code <= 0xDBFF && searchValue > 1) // surrogate pair - { + const code = searchValue.toLowerCase().charCodeAt(0); + let idxChar = searchValue.substr(0, 1).toLowerCase(); + if ( 0xD800 <= code && code <= 0xDBFF && searchValue > 1) { // surrogate pair idxChar = searchValue.substr(0, 2); } - var resultsPage; - var resultsPageWithSearch; - var hasResultsPage; - - var idx = indexSectionsWithContent[this.searchIndex].indexOf(idxChar); - if (idx!=-1) - { - var hexCode=idx.toString(16); - resultsPage = this.resultsPath + '/' + indexSectionNames[this.searchIndex] + '_' + hexCode + this.extension; - resultsPageWithSearch = resultsPage+'?'+escape(searchValue); - hasResultsPage = true; + let jsFile; + let idx = indexSectionsWithContent[this.searchIndex].indexOf(idxChar); + if (idx!=-1) { + const hexCode=idx.toString(16); + jsFile = this.resultsPath + indexSectionNames[this.searchIndex] + '_' + hexCode + '.js'; + } + + const loadJS = function(url, impl, loc) { + const scriptTag = document.createElement('script'); + scriptTag.src = url; + scriptTag.onload = impl; + scriptTag.onreadystatechange = impl; + loc.appendChild(scriptTag); } - else // nothing available for this search term - { - resultsPage = this.resultsPath + '/nomatches' + this.extension; - resultsPageWithSearch = resultsPage; - hasResultsPage = false; + + const domPopupSearchResultsWindow = this.DOMPopupSearchResultsWindow(); + const domSearchBox = this.DOMSearchBox(); + const domPopupSearchResults = this.DOMPopupSearchResults(); + const domSearchClose = this.DOMSearchClose(); + const resultsPath = this.resultsPath; + + const handleResults = function() { + document.getElementById("Loading").style.display="none"; + if (typeof searchData !== 'undefined') { + createResults(resultsPath); + document.getElementById("NoMatches").style.display="none"; + } + + if (idx!=-1) { + searchResults.Search(searchValue); + } else { // no file with search results => force empty search results + searchResults.Search('===='); + } + + if (domPopupSearchResultsWindow.style.display!='block') { + domSearchClose.style.display = 'inline-block'; + let left = getXPos(domSearchBox) + 150; + let top = getYPos(domSearchBox) + 20; + domPopupSearchResultsWindow.style.display = 'block'; + left -= domPopupSearchResults.offsetWidth; + const maxWidth = document.body.clientWidth; + const maxHeight = document.body.clientHeight; + let width = 300; + if (left<10) left=10; + if (width+left+8>maxWidth) width=maxWidth-left-8; + let height = 400; + if (height+top+8>maxHeight) height=maxHeight-top-8; + domPopupSearchResultsWindow.style.top = top + 'px'; + domPopupSearchResultsWindow.style.left = left + 'px'; + domPopupSearchResultsWindow.style.width = width + 'px'; + domPopupSearchResultsWindow.style.height = height + 'px'; + } } - window.frames.MSearchResults.location = resultsPageWithSearch; - var domPopupSearchResultsWindow = this.DOMPopupSearchResultsWindow(); - - if (domPopupSearchResultsWindow.style.display!='block') - { - var domSearchBox = this.DOMSearchBox(); - this.DOMSearchClose().style.display = 'inline-block'; - if (this.insideFrame) - { - var domPopupSearchResults = this.DOMPopupSearchResults(); - domPopupSearchResultsWindow.style.position = 'relative'; - domPopupSearchResultsWindow.style.display = 'block'; - var width = document.body.clientWidth - 8; // the -8 is for IE :-( - domPopupSearchResultsWindow.style.width = width + 'px'; - domPopupSearchResults.style.width = width + 'px'; - } - else - { - var domPopupSearchResults = this.DOMPopupSearchResults(); - var left = getXPos(domSearchBox) + 150; // domSearchBox.offsetWidth; - var top = getYPos(domSearchBox) + 20; // domSearchBox.offsetHeight + 1; - domPopupSearchResultsWindow.style.display = 'block'; - left -= domPopupSearchResults.offsetWidth; - domPopupSearchResultsWindow.style.top = top + 'px'; - domPopupSearchResultsWindow.style.left = left + 'px'; - } + if (jsFile) { + loadJS(jsFile, handleResults, this.DOMPopupSearchResultsWindow()); + } else { + handleResults(); } this.lastSearchValue = searchValue; - this.lastResultsPage = resultsPage; } // -------- Activation Functions // Activates or deactivates the search panel, resetting things to // their default values if necessary. - this.Activate = function(isActive) - { + this.Activate = function(isActive) { if (isActive || // open it - this.DOMPopupSearchResultsWindow().style.display == 'block' - ) - { + this.DOMPopupSearchResultsWindow().style.display == 'block' + ) { this.DOMSearchBox().className = 'MSearchBoxActive'; - - var searchField = this.DOMSearchField(); - - if (searchField.value == this.searchLabel) // clear "Search" term upon entry - { - searchField.value = ''; - this.searchActive = true; - } - } - else if (!isActive) // directly remove the panel - { + this.searchActive = true; + } else if (!isActive) { // directly remove the panel this.DOMSearchBox().className = 'MSearchBoxInactive'; - this.DOMSearchField().value = this.searchLabel; this.searchActive = false; this.lastSearchValue = '' this.lastResultsPage = ''; + this.DOMSearchField().value = ''; } } } @@ -426,391 +348,347 @@ function SearchBox(name, resultsPath, inFrame, label, extension) // ----------------------------------------------------------------------- // The class that handles everything on the search results page. -function SearchResults(name) -{ - // The number of matches from the last run of . - this.lastMatchCount = 0; - this.lastKey = 0; - this.repeatOn = false; - - // Toggles the visibility of the passed element ID. - this.FindChildElement = function(id) - { - var parentElement = document.getElementById(id); - var element = parentElement.firstChild; - - while (element && element!=parentElement) - { - if (element.nodeName.toLowerCase() == 'div' && element.className == 'SRChildren') - { - return element; - } +function SearchResults() { + + function convertToId(search) { + let result = ''; + for (let i=0;i. + this.lastMatchCount = 0; + this.lastKey = 0; + this.repeatOn = false; - if (element && element!=parentElement) - { - element = element.nextSibling; - } - } + // Toggles the visibility of the passed element ID. + this.FindChildElement = function(id) { + const parentElement = document.getElementById(id); + let element = parentElement.firstChild; + + while (element && element!=parentElement) { + if (element.nodeName.toLowerCase() == 'div' && element.className == 'SRChildren') { + return element; } - } - this.Toggle = function(id) - { - var element = this.FindChildElement(id); - if (element) - { - if (element.style.display == 'block') - { - element.style.display = 'none'; + if (element.nodeName.toLowerCase() == 'div' && element.hasChildNodes()) { + element = element.firstChild; + } else if (element.nextSibling) { + element = element.nextSibling; + } else { + do { + element = element.parentNode; } - else - { - element.style.display = 'block'; + while (element && element!=parentElement && !element.nextSibling); + + if (element && element!=parentElement) { + element = element.nextSibling; } } } + } - // Searches for the passed string. If there is no parameter, - // it takes it from the URL query. - // - // Always returns true, since other documents may try to call it - // and that may or may not be possible. - this.Search = function(search) - { - if (!search) // get search word from URL - { - search = window.location.search; - search = search.substring(1); // Remove the leading '?' - search = unescape(search); + this.Toggle = function(id) { + const element = this.FindChildElement(id); + if (element) { + if (element.style.display == 'block') { + element.style.display = 'none'; + } else { + element.style.display = 'block'; } + } + } - search = search.replace(/^ +/, ""); // strip leading spaces - search = search.replace(/ +$/, ""); // strip trailing spaces - search = search.toLowerCase(); - search = convertToId(search); - - var resultRows = document.getElementsByTagName("div"); - var matches = 0; - - var i = 0; - while (i < resultRows.length) - { - var row = resultRows.item(i); - if (row.className == "SRResult") - { - var rowMatchName = row.id.toLowerCase(); - rowMatchName = rowMatchName.replace(/^sr\d*_/, ''); // strip 'sr123_' - - if (search.length<=rowMatchName.length && - rowMatchName.substr(0, search.length)==search) - { - row.style.display = 'block'; - matches++; - } - else - { - row.style.display = 'none'; - } + // Searches for the passed string. If there is no parameter, + // it takes it from the URL query. + // + // Always returns true, since other documents may try to call it + // and that may or may not be possible. + this.Search = function(search) { + if (!search) { // get search word from URL + search = window.location.search; + search = search.substring(1); // Remove the leading '?' + search = unescape(search); + } + + search = search.replace(/^ +/, ""); // strip leading spaces + search = search.replace(/ +$/, ""); // strip trailing spaces + search = search.toLowerCase(); + search = convertToId(search); + + const resultRows = document.getElementsByTagName("div"); + let matches = 0; + + let i = 0; + while (i < resultRows.length) { + const row = resultRows.item(i); + if (row.className == "SRResult") { + let rowMatchName = row.id.toLowerCase(); + rowMatchName = rowMatchName.replace(/^sr\d*_/, ''); // strip 'sr123_' + + if (search.length<=rowMatchName.length && + rowMatchName.substr(0, search.length)==search) { + row.style.display = 'block'; + matches++; + } else { + row.style.display = 'none'; } - i++; - } - document.getElementById("Searching").style.display='none'; - if (matches == 0) // no results - { - document.getElementById("NoMatches").style.display='block'; } - else // at least one result - { - document.getElementById("NoMatches").style.display='none'; - } - this.lastMatchCount = matches; - return true; + i++; } + document.getElementById("Searching").style.display='none'; + if (matches == 0) { // no results + document.getElementById("NoMatches").style.display='block'; + } else { // at least one result + document.getElementById("NoMatches").style.display='none'; + } + this.lastMatchCount = matches; + return true; + } - // return the first item with index index or higher that is visible - this.NavNext = function(index) - { - var focusItem; - while (1) - { - var focusName = 'Item'+index; - focusItem = document.getElementById(focusName); - if (focusItem && focusItem.parentNode.parentNode.style.display=='block') - { - break; - } - else if (!focusItem) // last element - { - break; - } - focusItem=null; - index++; + // return the first item with index index or higher that is visible + this.NavNext = function(index) { + let focusItem; + for (;;) { + const focusName = 'Item'+index; + focusItem = document.getElementById(focusName); + if (focusItem && focusItem.parentNode.parentNode.style.display=='block') { + break; + } else if (!focusItem) { // last element + break; } - return focusItem; + focusItem=null; + index++; } + return focusItem; + } - this.NavPrev = function(index) - { - var focusItem; - while (1) - { - var focusName = 'Item'+index; - focusItem = document.getElementById(focusName); - if (focusItem && focusItem.parentNode.parentNode.style.display=='block') - { - break; - } - else if (!focusItem) // last element - { - break; - } - focusItem=null; - index--; + this.NavPrev = function(index) { + let focusItem; + for (;;) { + const focusName = 'Item'+index; + focusItem = document.getElementById(focusName); + if (focusItem && focusItem.parentNode.parentNode.style.display=='block') { + break; + } else if (!focusItem) { // last element + break; } - return focusItem; + focusItem=null; + index--; } + return focusItem; + } - this.ProcessKeys = function(e) - { - if (e.type == "keydown") - { - this.repeatOn = false; - this.lastKey = e.keyCode; + this.ProcessKeys = function(e) { + if (e.type == "keydown") { + this.repeatOn = false; + this.lastKey = e.keyCode; + } else if (e.type == "keypress") { + if (!this.repeatOn) { + if (this.lastKey) this.repeatOn = true; + return false; // ignore first keypress after keydown } - else if (e.type == "keypress") - { - if (!this.repeatOn) - { - if (this.lastKey) this.repeatOn = true; - return false; // ignore first keypress after keydown - } - } - else if (e.type == "keyup") - { - this.lastKey = 0; - this.repeatOn = false; - } - return this.lastKey!=0; + } else if (e.type == "keyup") { + this.lastKey = 0; + this.repeatOn = false; } + return this.lastKey!=0; + } - this.Nav = function(evt,itemIndex) - { - var e = (evt) ? evt : window.event; // for IE - if (e.keyCode==13) return true; - if (!this.ProcessKeys(e)) return false; - - if (this.lastKey==38) // Up - { - var newIndex = itemIndex-1; - var focusItem = this.NavPrev(newIndex); - if (focusItem) - { - var child = this.FindChildElement(focusItem.parentNode.parentNode.id); - if (child && child.style.display == 'block') // children visible - { - var n=0; - var tmpElem; - while (1) // search for last child - { - tmpElem = document.getElementById('Item'+newIndex+'_c'+n); - if (tmpElem) - { - focusItem = tmpElem; - } - else // found it! - { - break; - } - n++; + this.Nav = function(evt,itemIndex) { + const e = (evt) ? evt : window.event; // for IE + if (e.keyCode==13) return true; + if (!this.ProcessKeys(e)) return false; + + if (this.lastKey==38) { // Up + const newIndex = itemIndex-1; + let focusItem = this.NavPrev(newIndex); + if (focusItem) { + let child = this.FindChildElement(focusItem.parentNode.parentNode.id); + if (child && child.style.display == 'block') { // children visible + let n=0; + let tmpElem; + for (;;) { // search for last child + tmpElem = document.getElementById('Item'+newIndex+'_c'+n); + if (tmpElem) { + focusItem = tmpElem; + } else { // found it! + break; } + n++; } } - if (focusItem) - { - focusItem.focus(); - } - else // return focus to search field - { - parent.document.getElementById("MSearchField").focus(); - } - } - else if (this.lastKey==40) // Down - { - var newIndex = itemIndex+1; - var focusItem; - var item = document.getElementById('Item'+itemIndex); - var elem = this.FindChildElement(item.parentNode.parentNode.id); - if (elem && elem.style.display == 'block') // children visible - { - focusItem = document.getElementById('Item'+itemIndex+'_c0'); - } - if (!focusItem) focusItem = this.NavNext(newIndex); - if (focusItem) focusItem.focus(); } - else if (this.lastKey==39) // Right - { - var item = document.getElementById('Item'+itemIndex); - var elem = this.FindChildElement(item.parentNode.parentNode.id); - if (elem) elem.style.display = 'block'; + if (focusItem) { + focusItem.focus(); + } else { // return focus to search field + document.getElementById("MSearchField").focus(); } - else if (this.lastKey==37) // Left - { - var item = document.getElementById('Item'+itemIndex); - var elem = this.FindChildElement(item.parentNode.parentNode.id); - if (elem) elem.style.display = 'none'; + } else if (this.lastKey==40) { // Down + const newIndex = itemIndex+1; + let focusItem; + const item = document.getElementById('Item'+itemIndex); + const elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem && elem.style.display == 'block') { // children visible + focusItem = document.getElementById('Item'+itemIndex+'_c0'); } - else if (this.lastKey==27) // Escape - { - parent.searchBox.CloseResultsWindow(); - parent.document.getElementById("MSearchField").focus(); - } - else if (this.lastKey==13) // Enter - { - return true; - } - return false; + if (!focusItem) focusItem = this.NavNext(newIndex); + if (focusItem) focusItem.focus(); + } else if (this.lastKey==39) { // Right + const item = document.getElementById('Item'+itemIndex); + const elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem) elem.style.display = 'block'; + } else if (this.lastKey==37) { // Left + const item = document.getElementById('Item'+itemIndex); + const elem = this.FindChildElement(item.parentNode.parentNode.id); + if (elem) elem.style.display = 'none'; + } else if (this.lastKey==27) { // Escape + e.stopPropagation(); + searchBox.CloseResultsWindow(); + document.getElementById("MSearchField").focus(); + } else if (this.lastKey==13) { // Enter + return true; } + return false; + } - this.NavChild = function(evt,itemIndex,childIndex) - { - var e = (evt) ? evt : window.event; // for IE - if (e.keyCode==13) return true; - if (!this.ProcessKeys(e)) return false; - - if (this.lastKey==38) // Up - { - if (childIndex>0) - { - var newIndex = childIndex-1; - document.getElementById('Item'+itemIndex+'_c'+newIndex).focus(); - } - else // already at first child, jump to parent - { - document.getElementById('Item'+itemIndex).focus(); - } - } - else if (this.lastKey==40) // Down - { - var newIndex = childIndex+1; - var elem = document.getElementById('Item'+itemIndex+'_c'+newIndex); - if (!elem) // last child, jump to parent next parent - { - elem = this.NavNext(itemIndex+1); - } - if (elem) - { - elem.focus(); - } + this.NavChild = function(evt,itemIndex,childIndex) { + const e = (evt) ? evt : window.event; // for IE + if (e.keyCode==13) return true; + if (!this.ProcessKeys(e)) return false; + + if (this.lastKey==38) { // Up + if (childIndex>0) { + const newIndex = childIndex-1; + document.getElementById('Item'+itemIndex+'_c'+newIndex).focus(); + } else { // already at first child, jump to parent + document.getElementById('Item'+itemIndex).focus(); } - else if (this.lastKey==27) // Escape - { - parent.searchBox.CloseResultsWindow(); - parent.document.getElementById("MSearchField").focus(); + } else if (this.lastKey==40) { // Down + const newIndex = childIndex+1; + let elem = document.getElementById('Item'+itemIndex+'_c'+newIndex); + if (!elem) { // last child, jump to parent next parent + elem = this.NavNext(itemIndex+1); } - else if (this.lastKey==13) // Enter - { - return true; + if (elem) { + elem.focus(); } - return false; + } else if (this.lastKey==27) { // Escape + e.stopPropagation(); + searchBox.CloseResultsWindow(); + document.getElementById("MSearchField").focus(); + } else if (this.lastKey==13) { // Enter + return true; } + return false; + } } -function setKeyActions(elem,action) -{ - elem.setAttribute('onkeydown',action); - elem.setAttribute('onkeypress',action); - elem.setAttribute('onkeyup',action); -} +function createResults(resultsPath) { -function setClassAttr(elem,attr) -{ - elem.setAttribute('class',attr); - elem.setAttribute('className',attr); -} + function setKeyActions(elem,action) { + elem.setAttribute('onkeydown',action); + elem.setAttribute('onkeypress',action); + elem.setAttribute('onkeyup',action); + } + + function setClassAttr(elem,attr) { + elem.setAttribute('class',attr); + elem.setAttribute('className',attr); + } -function createResults() -{ - var results = document.getElementById("SRResults"); - for (var e=0; e { + const id = elem[0]; + const srResult = document.createElement('div'); srResult.setAttribute('id','SR_'+id); setClassAttr(srResult,'SRResult'); - var srEntry = document.createElement('div'); + const srEntry = document.createElement('div'); setClassAttr(srEntry,'SREntry'); - var srLink = document.createElement('a'); - srLink.setAttribute('id','Item'+e); - setKeyActions(srLink,'return searchResults.Nav(event,'+e+')'); + const srLink = document.createElement('a'); + srLink.setAttribute('id','Item'+index); + setKeyActions(srLink,'return searchResults.Nav(event,'+index+')'); setClassAttr(srLink,'SRSymbol'); - srLink.innerHTML = searchData[e][1][0]; + srLink.innerHTML = elem[1][0]; srEntry.appendChild(srLink); - if (searchData[e][1].length==2) // single result - { - srLink.setAttribute('href',searchData[e][1][1][0]); - if (searchData[e][1][1][1]) - { + if (elem[1].length==2) { // single result + srLink.setAttribute('href',resultsPath+elem[1][1][0]); + srLink.setAttribute('onclick','searchBox.CloseResultsWindow()'); + if (elem[1][1][1]) { srLink.setAttribute('target','_parent'); + } else { + srLink.setAttribute('target','_blank'); } - var srScope = document.createElement('span'); + const srScope = document.createElement('span'); setClassAttr(srScope,'SRScope'); - srScope.innerHTML = searchData[e][1][1][2]; + srScope.innerHTML = elem[1][1][2]; srEntry.appendChild(srScope); - } - else // multiple results - { + } else { // multiple results srLink.setAttribute('href','javascript:searchResults.Toggle("SR_'+id+'")'); - var srChildren = document.createElement('div'); + const srChildren = document.createElement('div'); setClassAttr(srChildren,'SRChildren'); - for (var c=0; c - + - - + + mi-malloc: Using the library + - + + @@ -29,20 +31,16 @@
    - + - -
    -
    mi-malloc -  1.7/2.0 +
    +
    mi-malloc 1.8/2.1
    +
    - -   + @@ -56,10 +54,15 @@
    - + +
    @@ -74,8 +77,8 @@
    @@ -88,24 +91,30 @@
    - +
    +
    +
    +
    +
    Loading...
    +
    Searching...
    +
    No Matches
    +
    +
    +
    -
    -
    -
    Using the library
    +
    +
    Using the library

    Build

    The preferred usage is including <mimalloc.h>, linking with the shared- or static library, and using the mi_malloc API exclusively for allocation. For example,

    gcc -o myprogram -lmimalloc myfile.c
    -

    mimalloc uses only safe OS calls (mmap and VirtualAlloc) and can co-exist with other allocators linked to the same program. If you use cmake, you can simply use:

    find_package(mimalloc 1.0 REQUIRED)
    +

    mimalloc uses only safe OS calls (mmap and VirtualAlloc) and can co-exist with other allocators linked to the same program. If you use cmake, you can simply use:

    find_package(mimalloc 2.1 REQUIRED)

    in your CMakeLists.txt to find a locally installed mimalloc. Then use either:

    target_link_libraries(myapp PUBLIC mimalloc)

    to link with the shared (dynamic) library, or:

    target_link_libraries(myapp PUBLIC mimalloc-static)

    to link with the static library. See test\CMakeLists.txt for an example.

    C++

    -

    For best performance in C++ programs, it is also recommended to override the global new and delete operators. For convience, mimalloc provides mimalloc-new-delete.h which does this for you – just include it in a single(!) source file in your project without linking to the mimalloc's library.

    +

    For best performance in C++ programs, it is also recommended to override the global new and delete operators. For convenience, mimalloc provides mimalloc-new-delete.h which does this for you – just include it in a single(!) source file in your project.

    In C++, mimalloc also provides the mi_stl_allocator struct which implements the std::allocator interface. For example:

    std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
    vec.push_back(some_struct());

    Statistics

    @@ -148,7 +157,7 @@

    C++

    diff --git a/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj b/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj deleted file mode 100644 index faaa00e35477..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj +++ /dev/null @@ -1,190 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7868F-750E-4C21-A04D-22707CC66879} - mimalloc-override-test - 10.0.17134.0 - mimalloc-override-test - - - - Application - true - v141 - - - Application - false - v141 - true - - - Application - true - v141 - - - Application - false - v141 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - MultiThreadedDebugDLL - false - Default - false - - - Console - kernel32.lib;%(AdditionalDependencies) - - - - - - - - - - Level3 - Disabled - true - true - ..\..\include - MultiThreadedDebugDLL - Sync - Default - false - - - Console - - - kernel32.lib;%(AdditionalDependencies) - - - - - - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - MultiThreadedDLL - - - true - true - Console - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) - - - - - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - MultiThreadedDLL - - - true - true - Console - - - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) - - - - - - - - - {abb5eae7-b3e6-432e-b636-333449892ea7} - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj.filters b/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj.filters deleted file mode 100644 index eb5e70b7c25b..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj.filters +++ /dev/null @@ -1,22 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;ipp;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - - - Source Files - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj b/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj deleted file mode 100644 index a87b69ac6887..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj +++ /dev/null @@ -1,256 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA7} - mimalloc-override - 10.0.17134.0 - mimalloc-override - - - - DynamicLibrary - true - v141 - - - DynamicLibrary - false - v141 - - - DynamicLibrary - true - v141 - - - DynamicLibrary - false - v141 - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - - Level3 - Disabled - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - Disabled - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - - - - - - - - - true - true - true - true - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj.filters b/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj.filters deleted file mode 100644 index d01f9311f199..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-override.vcxproj.filters +++ /dev/null @@ -1,86 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;ipp;xsd - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj b/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj deleted file mode 100644 index b8267d0b39b2..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj +++ /dev/null @@ -1,159 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7958F-750E-4C21-A04D-22707CC66878} - mimalloc-test-stress - 10.0.17134.0 - mimalloc-test-stress - - - - Application - true - v141 - - - Application - false - v141 - true - - - Application - true - v141 - - - Application - false - v141 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - false - false - false - false - - - - - {abb5eae7-b3e6-432e-b636-333449892ea6} - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj.filters b/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj.filters deleted file mode 100644 index 7c5239e8271c..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj.filters +++ /dev/null @@ -1,22 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;ipp;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - - - Source Files - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj b/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj deleted file mode 100644 index 27c7bb6ea44c..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj +++ /dev/null @@ -1,158 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7858F-750E-4C21-A04D-22707CC66878} - mimalloctest - 10.0.17134.0 - mimalloc-test - - - - Application - true - v141 - - - Application - false - v141 - true - - - Application - true - v141 - - - Application - false - v141 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - stdcpp17 - - - Console - - - - - Level3 - Disabled - true - true - ..\..\include - stdcpp17 - - - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - stdcpp17 - - - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - stdcpp17 - - - true - true - Console - - - - - {abb5eae7-b3e6-432e-b636-333449892ea6} - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj.filters b/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj.filters deleted file mode 100644 index fca75e1c3023..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc-test.vcxproj.filters +++ /dev/null @@ -1,22 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;ipp;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - - - Source Files - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc.sln b/depends/mimalloc/ide/vs2017/mimalloc.sln deleted file mode 100644 index aeab6b88fc35..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc.sln +++ /dev/null @@ -1,71 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.28010.2016 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} - EndGlobalSection -EndGlobal diff --git a/depends/mimalloc/ide/vs2017/mimalloc.vcxproj b/depends/mimalloc/ide/vs2017/mimalloc.vcxproj deleted file mode 100644 index 41fb77c1bf23..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc.vcxproj +++ /dev/null @@ -1,262 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA6} - mimalloc - 10.0.17134.0 - mimalloc - - - - StaticLibrary - true - v141 - - - StaticLibrary - false - v141 - true - - - StaticLibrary - true - v141 - - - StaticLibrary - false - v141 - true - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - false - - - false - - - false - - - false - - - - Level3 - Disabled - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsC - false - stdcpp17 - - - - - - - - - - - Level4 - Disabled - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsC - false - stdcpp17 - - - - - - - - - - - - - - - - - - - Level3 - MaxSpeed - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsC - true - - - true - true - - - - - - - - - - - Level4 - MaxSpeed - true - true - ../../include - _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsC - true - - - true - true - - - - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - true - true - true - true - - - - - - - - - - true - true - true - true - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2017/mimalloc.vcxproj.filters b/depends/mimalloc/ide/vs2017/mimalloc.vcxproj.filters deleted file mode 100644 index 054176452686..000000000000 --- a/depends/mimalloc/ide/vs2017/mimalloc.vcxproj.filters +++ /dev/null @@ -1,92 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hh;hpp;hxx;hm;inl;inc;ipp;xsd - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj deleted file mode 100644 index 7a9202f1b1b6..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj +++ /dev/null @@ -1,190 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7868F-750E-4C21-A04D-22707CC66879} - mimalloc-override-test - 10.0 - mimalloc-override-test - - - - Application - true - v142 - - - Application - false - v142 - true - - - Application - true - v142 - - - Application - false - v142 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - MultiThreadedDebugDLL - Sync - Default - false - - - Console - kernel32.lib;%(AdditionalDependencies) - - - - - - - - - - Level3 - Disabled - true - true - ..\..\include - MultiThreadedDebugDLL - Sync - Default - false - - - Console - - - kernel32.lib;%(AdditionalDependencies) - - - - - - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - MultiThreadedDLL - - - true - true - Console - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) - - - - - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - MultiThreadedDLL - - - true - true - Console - - - kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) - - - - - - - - - - - - {abb5eae7-b3e6-432e-b636-333449892ea7} - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj deleted file mode 100644 index 4136e574a83c..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj +++ /dev/null @@ -1,256 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA7} - mimalloc-override - 10.0 - mimalloc-override - - - - DynamicLibrary - true - v142 - - - DynamicLibrary - false - v142 - - - DynamicLibrary - true - v142 - - - DynamicLibrary - false - v142 - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - - Level3 - Disabled - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - Disabled - true - true - ../../include - MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - - - - - - - - - true - true - true - true - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj.filters b/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj.filters deleted file mode 100644 index d6b7b5a966df..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-override.vcxproj.filters +++ /dev/null @@ -1,84 +0,0 @@ - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - - - {f1fccf27-17b9-42dd-ba51-6070baff85c6} - - - {39cb7e38-69d0-43fb-8406-6a0f7cefc3b4} - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj deleted file mode 100644 index 812a9cb1163c..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj +++ /dev/null @@ -1,155 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FFF7958F-750E-4C21-A04D-22707CC66878} - mimalloc-test-api - 10.0 - mimalloc-test-api - - - - Application - true - v142 - - - Application - false - v142 - true - - - Application - true - v142 - - - Application - false - v142 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - - - - - {abb5eae7-b3e6-432e-b636-333449892ea6} - - - - - - diff --git a/depends/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj deleted file mode 100644 index ef7ab3575a96..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj +++ /dev/null @@ -1,159 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7958F-750E-4C21-A04D-22707CC66878} - mimalloc-test-stress - 10.0 - mimalloc-test-stress - - - - Application - true - v142 - - - Application - false - v142 - true - - - Application - true - v142 - - - Application - false - v142 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - Disabled - true - true - ..\..\include - - - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - %(PreprocessorDefinitions);NDEBUG - - - true - true - Console - - - - - false - false - false - false - - - - - {abb5eae7-b3e6-432e-b636-333449892ea6} - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc-test.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc-test.vcxproj deleted file mode 100644 index 13af6ab4953b..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc-test.vcxproj +++ /dev/null @@ -1,158 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {FEF7858F-750E-4C21-A04D-22707CC66878} - mimalloctest - 10.0 - mimalloc-test - - - - Application - true - v142 - - - Application - false - v142 - true - - - Application - true - v142 - - - Application - false - v142 - true - - - - - - - - - - - - - - - - - - - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - - - - Level3 - Disabled - true - true - ..\..\include - stdcpp17 - - - Console - - - - - Level3 - Disabled - true - true - ..\..\include - stdcpp17 - - - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - stdcpp17 - - - true - true - Console - - - - - Level3 - MaxSpeed - true - true - true - true - ..\..\include - _MBCS;%(PreprocessorDefinitions);NDEBUG - stdcpp17 - - - true - true - Console - - - - - {abb5eae7-b3e6-432e-b636-333449892ea6} - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc.sln b/depends/mimalloc/ide/vs2019/mimalloc.sln deleted file mode 100644 index fcb938a4fe2a..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc.sln +++ /dev/null @@ -1,81 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.29709.97 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 - {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 - {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 - {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 - {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 - {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} - EndGlobalSection -EndGlobal diff --git a/depends/mimalloc/ide/vs2019/mimalloc.vcxproj b/depends/mimalloc/ide/vs2019/mimalloc.vcxproj deleted file mode 100644 index 9f967d94464b..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc.vcxproj +++ /dev/null @@ -1,254 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA6} - mimalloc - 10.0 - mimalloc - - - - StaticLibrary - true - v142 - - - StaticLibrary - false - v142 - true - - - StaticLibrary - true - v142 - - - StaticLibrary - false - v142 - true - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - - Level4 - Disabled - true - true - ../../include - MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsCpp - false - Default - - - - - - - - - - - Level4 - Disabled - true - true - ../../include - MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsCpp - false - Default - - - - - - - - - - - - - - - - - - - Level4 - MaxSpeed - true - true - ../../include - %(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsCpp - true - Default - - - true - true - - - - - - - - - - - Level4 - MaxSpeed - true - true - ../../include - %(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsCpp - true - Default - - - true - true - - - - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - true - true - true - true - - - - - - false - - - - - - true - true - true - true - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2019/mimalloc.vcxproj.filters b/depends/mimalloc/ide/vs2019/mimalloc.vcxproj.filters deleted file mode 100644 index 92be7cb47e81..000000000000 --- a/depends/mimalloc/ide/vs2019/mimalloc.vcxproj.filters +++ /dev/null @@ -1,87 +0,0 @@ - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Source Files - - - - - {2b556b10-f559-4b2d-896e-142652adbf0c} - - - {852a14ae-6dde-4e95-8077-ca705e97e5af} - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj new file mode 100644 index 000000000000..b4bf013e0890 --- /dev/null +++ b/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj @@ -0,0 +1,500 @@ + + + + + Debug + ARM64 + + + Debug + ARM64EC + + + Debug + Win32 + + + Release + ARM64 + + + Release + ARM64EC + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc-lib + 10.0 + mimalloc-lib + + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + StaticLibrary + true + v143 + + + StaticLibrary + true + v143 + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + StaticLibrary + false + v143 + true + + + StaticLibrary + false + v143 + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc + + + false + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + /Zc:__cplusplus %(AdditionalOptions) + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + /Zc:__cplusplus %(AdditionalOptions) + + + + + + + + + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + /Zc:__cplusplus %(AdditionalOptions) + + + + + + + + + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;MI_GUARDED=0;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + /Zc:__cplusplus %(AdditionalOptions) + + + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + AdvancedVectorExtensions2 + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + CPUExtensionRequirementsARMv81 + Sync + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + CPUExtensionRequirementsARMv81 + Sync + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + false + false + false + false + + + true + true + true + true + true + true + true + true + + + + + + + false + false + false + + + true + true + true + true + true + true + true + true + + + + + + + + true + true + true + true + true + true + true + true + + + + true + true + true + true + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters b/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters new file mode 100644 index 000000000000..6825f113fd69 --- /dev/null +++ b/depends/mimalloc/ide/vs2022/mimalloc-lib.vcxproj.filters @@ -0,0 +1,108 @@ + + + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + + + {1430490c-e711-4ace-a1b8-36f4d5105873} + + + {461c78ef-04b0-44d1-a0ca-7d488abaa592} + + + \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj new file mode 100644 index 000000000000..556d7926b766 --- /dev/null +++ b/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj @@ -0,0 +1,515 @@ + + + + + Debug + ARM64 + + + Debug + ARM64EC + + + Debug + Win32 + + + Release + ARM64 + + + Release + ARM64EC + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override-dll + 10.0 + mimalloc-override-dll + + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + DynamicLibrary + false + v143 + + + DynamicLibrary + false + v143 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc + + + false + + + + Level3 + Disabled + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + CompileAsCpp + /Zc:__cplusplus %(AdditionalOptions) + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + CompileAsCpp + /Zc:__cplusplus %(AdditionalOptions) + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + CompileAsCpp + /Zc:__cplusplus %(AdditionalOptions) + + + $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies) + + + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)" + + + copy mimalloc-redirect-arm64.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + CompileAsCpp + /Zc:__cplusplus %(AdditionalOptions) + + + $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64ec.lib;%(AdditionalDependencies) + + + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64ec.dll" "$(OutputPath)" + + + copy mimalloc-redirect-arm64ec.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + CompileAsCpp + false + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + CompileAsCpp + false + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + CompileAsCpp + false + CPUExtensionRequirementsARMv81 + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64.lib;%(AdditionalDependencies) + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64.dll" "$(OutputPath)" + + + copy mimalloc-redirect-arm64.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + CompileAsCpp + false + CPUExtensionRequirementsARMv81 + /Zc:__cplusplus %(AdditionalOptions) + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect-arm64ec.lib;%(AdditionalDependencies) + + + Default + false + $(OutDir)$(TargetName).dll.lib + $(OutDir)$(TargetName).dll.pdb + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect-arm64ec.dll" "$(OutputPath)" + + + copy mimalloc-redirect-arm64ec.dll to the output directory + + + + + + + + + + + + + + + + + + false + false + false + false + false + false + false + false + + + true + true + true + true + true + true + true + true + + + + + + + + true + true + true + true + true + true + true + true + + + + + + + + true + true + true + true + true + true + true + true + + + + + true + true + true + true + true + true + true + true + + + + + + + + + \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters b/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters new file mode 100644 index 000000000000..ebcf545af7e2 --- /dev/null +++ b/depends/mimalloc/ide/vs2022/mimalloc-override-dll.vcxproj.filters @@ -0,0 +1,108 @@ + + + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + Sources + + + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + Headers + + + + + {262c6c21-e270-4ba6-bd63-4ac999307e4e} + + + {94b40bdc-a741-45dd-81aa-c05fabcd2970} + + + \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj new file mode 100644 index 000000000000..c1b89690e448 --- /dev/null +++ b/depends/mimalloc/ide/vs2022/mimalloc-override-test-dep.vcxproj @@ -0,0 +1,355 @@ + + + + + Debug + ARM64 + + + Debug + ARM64EC + + + Debug + Win32 + + + Release + ARM64 + + + Release + ARM64EC + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7869F-750E-4C21-A04D-22707CC66879} + mimalloc-test-override-dep + 10.0 + mimalloc-test-override-dep + + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + true + + + DynamicLibrary + true + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + true + + + DynamicLibrary + false + v143 + true + + + DynamicLibrary + false + v143 + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + false + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj index a3c56f7badce..1dc2cee71b60 100644 --- a/depends/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj +++ b/depends/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj @@ -1,10 +1,26 @@ + + Debug + ARM64 + + + Debug + ARM64EC + Debug Win32 + + Release + ARM64 + + + Release + ARM64EC + Release Win32 @@ -23,7 +39,7 @@ {FEF7868F-750E-4C21-A04D-22707CC66879} mimalloc-override-test 10.0 - mimalloc-override-test + mimalloc-test-override @@ -42,12 +58,34 @@ true v143 + + Application + true + v143 + + + Application + true + v143 + Application false v143 true + + Application + false + v143 + true + + + Application + false + v143 + true + @@ -62,9 +100,21 @@ + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ @@ -78,10 +128,29 @@ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + false + Level3 @@ -128,6 +197,54 @@ + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + Level3 @@ -176,13 +293,66 @@ + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + - + {abb5eae7-b3e6-432e-b636-333449892ea7} + + {fef7869f-750e-4c21-a04d-22707cc66879} + diff --git a/depends/mimalloc/ide/vs2022/mimalloc-override.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-override.vcxproj deleted file mode 100644 index f22d54a4edf4..000000000000 --- a/depends/mimalloc/ide/vs2022/mimalloc-override.vcxproj +++ /dev/null @@ -1,257 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA7} - mimalloc-override - 10.0 - mimalloc-override - - - - DynamicLibrary - true - v143 - - - DynamicLibrary - false - v143 - - - DynamicLibrary - true - v143 - - - DynamicLibrary - false - v143 - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .dll - mimalloc-override - - - - Level3 - Disabled - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - Disabled - true - true - ../../include - MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); - MultiThreadedDebugDLL - false - Default - - - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) - - - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath) - - - Copy mimalloc-redirect32.dll to the output directory - - - - - Level3 - MaxSpeed - true - true - true - ../../include - MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - MultiThreadedDLL - Default - false - - - true - true - $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) - - - Default - false - - - COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath) - - - copy mimalloc-redirect.dll to the output directory - - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - - - - - - - - - true - true - true - true - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj index 6023c251fbe2..440693a2162e 100644 --- a/depends/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj +++ b/depends/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj @@ -1,10 +1,26 @@ + + Debug + ARM64 + + + Debug + ARM64EC + Debug Win32 + + Release + ARM64 + + + Release + ARM64EC + Release Win32 @@ -42,12 +58,34 @@ true v143 + + Application + true + v143 + + + Application + true + v143 + Application false v143 true + + Application + false + v143 + true + + + Application + false + v143 + true + @@ -62,9 +100,21 @@ + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ @@ -78,10 +128,29 @@ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + false + Level3 @@ -106,6 +175,30 @@ Console + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + Level3 @@ -140,12 +233,59 @@ Console + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + true + true + true + true + true + true + true + true + + false + false + false - + {abb5eae7-b3e6-432e-b636-333449892ea6} diff --git a/depends/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj index c7e820dfecf2..128a4ff6cf69 100644 --- a/depends/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj +++ b/depends/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj @@ -1,10 +1,26 @@ + + Debug + ARM64 + + + Debug + ARM64EC + Debug Win32 + + Release + ARM64 + + + Release + ARM64EC + Release Win32 @@ -42,12 +58,34 @@ true v143 + + Application + true + v143 + + + Application + true + v143 + Application false v143 true + + Application + false + v143 + true + + + Application + false + v143 + true + @@ -62,9 +100,21 @@ + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ @@ -78,10 +128,29 @@ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + false + Level3 @@ -106,6 +175,30 @@ Console + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + Level3 @@ -140,17 +233,57 @@ Console + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + CPUExtensionRequirementsARMv81 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + CPUExtensionRequirementsARMv81 + + + true + true + Console + + false false + false + false false false + false + false - - {abb5eae7-b3e6-432e-b636-333449892ea6} + + {abb5eae7-b3e6-432e-b636-333449892ea7} diff --git a/depends/mimalloc/ide/vs2022/mimalloc-test.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc-test.vcxproj index 506dd7d457e8..1e41fca115cc 100644 --- a/depends/mimalloc/ide/vs2022/mimalloc-test.vcxproj +++ b/depends/mimalloc/ide/vs2022/mimalloc-test.vcxproj @@ -1,10 +1,26 @@ + + Debug + ARM64 + + + Debug + ARM64EC + Debug Win32 + + Release + ARM64 + + + Release + ARM64EC + Release Win32 @@ -23,7 +39,7 @@ {FEF7858F-750E-4C21-A04D-22707CC66878} mimalloctest 10.0 - mimalloc-test + mimalloc-test-static @@ -42,12 +58,34 @@ true v143 + + Application + true + v143 + + + Application + true + v143 + Application false v143 true + + Application + false + v143 + true + + + Application + false + v143 + true + @@ -62,9 +100,21 @@ + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ @@ -78,10 +128,29 @@ $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + false + Level3 @@ -108,6 +177,32 @@ Console + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + Level3 @@ -144,13 +239,49 @@ Console + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + - - {abb5eae7-b3e6-432e-b636-333449892ea6} - + - + + {abb5eae7-b3e6-432e-b636-333449892ea6} + diff --git a/depends/mimalloc/ide/vs2022/mimalloc.sln b/depends/mimalloc/ide/vs2022/mimalloc.sln index fcb938a4fe2a..212b75155517 100644 --- a/depends/mimalloc/ide/vs2022/mimalloc.sln +++ b/depends/mimalloc/ide/vs2022/mimalloc.sln @@ -1,15 +1,17 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.29709.97 +# Visual Studio Version 17 +VisualStudioVersion = 17.12.35527.113 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-lib", "mimalloc-lib.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-static", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-dll", "mimalloc-override-dll.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-override-dep", "mimalloc-override-test-dep.vcxproj", "{FEF7869F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-override", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" EndProject @@ -17,56 +19,124 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimall EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|ARM64 = Debug|ARM64 + Debug|ARM64EC = Debug|ARM64EC Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 + Release|ARM64 = Release|ARM64 + Release|ARM64EC = Release|ARM64EC Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64.Build.0 = Debug|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.ActiveCfg = Release|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64.Build.0 = Release|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|ARM64EC.Build.0 = Release|ARM64EC {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64.Build.0 = Debug|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.ActiveCfg = Release|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64.Build.0 = Release|ARM64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|ARM64EC.Build.0 = Release|ARM64EC {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.Build.0 = Debug|ARM64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.Build.0 = Debug|ARM64EC + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|ARM64.ActiveCfg = Release|ARM64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|ARM64.Build.0 = Release|ARM64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.Build.0 = Release|ARM64EC + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7869F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64.Build.0 = Debug|ARM64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.ActiveCfg = Release|ARM64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64.Build.0 = Release|ARM64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|ARM64EC.Build.0 = Release|ARM64EC {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64.Build.0 = Debug|ARM64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.ActiveCfg = Debug|ARM64EC + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|ARM64EC.Build.0 = Debug|ARM64EC {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.ActiveCfg = Release|ARM64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64.Build.0 = Release|ARM64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.ActiveCfg = Release|ARM64EC + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|ARM64EC.Build.0 = Release|ARM64EC {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 diff --git a/depends/mimalloc/ide/vs2022/mimalloc.vcxproj b/depends/mimalloc/ide/vs2022/mimalloc.vcxproj deleted file mode 100644 index 00aaaffabb29..000000000000 --- a/depends/mimalloc/ide/vs2022/mimalloc.vcxproj +++ /dev/null @@ -1,255 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - Debug - x64 - - - Release - x64 - - - - 15.0 - {ABB5EAE7-B3E6-432E-B636-333449892EA6} - mimalloc - 10.0 - mimalloc - - - - StaticLibrary - true - v143 - - - StaticLibrary - false - v143 - true - - - StaticLibrary - true - v143 - - - StaticLibrary - false - v143 - true - - - - - - - - - - - - - - - - - - - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ - $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ - .lib - mimalloc-static - - - - Level4 - Disabled - true - true - ../../include - MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsCpp - false - Default - - - - - - - - - - - Level4 - Disabled - true - true - ../../include - MI_DEBUG=3;%(PreprocessorDefinitions); - CompileAsCpp - false - stdcpp20 - - - - - - - - - - - - - - - - - - - Level4 - MaxSpeed - true - true - ../../include - %(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsCpp - true - Default - - - true - true - - - - - - - - - - - Level4 - MaxSpeed - true - true - ../../include - %(PreprocessorDefinitions);NDEBUG - AssemblyAndSourceCode - $(IntDir) - false - false - Default - CompileAsCpp - true - stdcpp20 - - - true - true - - - - - - - - - - - - - - - - - false - false - false - false - - - true - true - true - true - - - true - true - true - true - - - - - - false - - - - - - true - true - true - true - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/depends/mimalloc/include/mimalloc-internal.h b/depends/mimalloc/include/mimalloc-internal.h deleted file mode 100644 index 550b654338ad..000000000000 --- a/depends/mimalloc/include/mimalloc-internal.h +++ /dev/null @@ -1,1093 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2018-2022, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ -#pragma once -#ifndef MIMALLOC_INTERNAL_H -#define MIMALLOC_INTERNAL_H - -#include "mimalloc-types.h" -#include "mimalloc-track.h" - -#if (MI_DEBUG>0) -#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) -#else -#define mi_trace_message(...) -#endif - -#define MI_CACHE_LINE 64 -#if defined(_MSC_VER) -#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) -#pragma warning(disable:26812) // unscoped enum warning -#define mi_decl_noinline __declspec(noinline) -#define mi_decl_thread __declspec(thread) -#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) -#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc -#define mi_decl_noinline __attribute__((noinline)) -#define mi_decl_thread __thread -#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) -#else -#define mi_decl_noinline -#define mi_decl_thread __thread // hope for the best :-) -#define mi_decl_cache_align -#endif - -#if defined(__EMSCRIPTEN__) && !defined(__wasi__) -#define __wasi__ -#endif - -#if defined(__cplusplus) -#define mi_decl_externc extern "C" -#else -#define mi_decl_externc -#endif - -#if !defined(_WIN32) && !defined(__wasi__) -#define MI_USE_PTHREADS -#include -#endif - -// "options.c" -void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); -void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); -void _mi_warning_message(const char* fmt, ...); -void _mi_verbose_message(const char* fmt, ...); -void _mi_trace_message(const char* fmt, ...); -void _mi_options_init(void); -void _mi_error_message(int err, const char* fmt, ...); - -// random.c -void _mi_random_init(mi_random_ctx_t* ctx); -void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); -uintptr_t _mi_random_next(mi_random_ctx_t* ctx); -uintptr_t _mi_heap_random_next(mi_heap_t* heap); -uintptr_t _mi_os_random_weak(uintptr_t extra_seed); -static inline uintptr_t _mi_random_shuffle(uintptr_t x); - -// init.c -extern mi_decl_cache_align mi_stats_t _mi_stats_main; -extern mi_decl_cache_align const mi_page_t _mi_page_empty; -bool _mi_is_main_thread(void); -size_t _mi_current_thread_count(void); -bool _mi_preloading(void); // true while the C runtime is not ready - -// os.c -size_t _mi_os_page_size(void); -void _mi_os_init(void); // called from process init -void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data -void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data - -bool _mi_os_protect(void* addr, size_t size); -bool _mi_os_unprotect(void* addr, size_t size); -bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats); -bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats); -// bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats); -size_t _mi_os_good_alloc_size(size_t size); -bool _mi_os_has_overcommit(void); - -// arena.c -void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); -void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); -void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld); -mi_arena_id_t _mi_arena_id_none(void); -bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id); - -// "segment-cache.c" -void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); -bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld); -void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld); -void _mi_segment_map_allocated_at(const mi_segment_t* segment); -void _mi_segment_map_freed_at(const mi_segment_t* segment); - -// "segment.c" -mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); -void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); -void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); -bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld); -void _mi_segment_thread_collect(mi_segments_tld_t* tld); -void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); - -uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page -void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); -void _mi_abandoned_await_readers(void); -void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld); - - - -// "page.c" -void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept mi_attr_malloc; - -void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks -void _mi_page_unfull(mi_page_t* page); -void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page -void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... -void _mi_heap_delayed_free_all(mi_heap_t* heap); -bool _mi_heap_delayed_free_partial(mi_heap_t* heap); -void _mi_heap_collect_retired(mi_heap_t* heap, bool force); - -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); -bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); -size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); -void _mi_deferred_free(mi_heap_t* heap, bool force); - -void _mi_page_free_collect(mi_page_t* page,bool force); -void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments - -size_t _mi_bin_size(uint8_t bin); // for stats -uint8_t _mi_bin(size_t size); // for stats - -// "heap.c" -void _mi_heap_destroy_pages(mi_heap_t* heap); -void _mi_heap_collect_abandon(mi_heap_t* heap); -void _mi_heap_set_default_direct(mi_heap_t* heap); -bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid); - -// "stats.c" -void _mi_stats_done(mi_stats_t* stats); - -mi_msecs_t _mi_clock_now(void); -mi_msecs_t _mi_clock_end(mi_msecs_t start); -mi_msecs_t _mi_clock_start(void); - -// "alloc.c" -void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` -void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; -void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; -mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p); -bool _mi_free_delayed_block(mi_block_t* block); - -#if MI_DEBUG>1 -bool _mi_page_is_valid(mi_page_t* page); -#endif - - -// ------------------------------------------------------ -// Branches -// ------------------------------------------------------ - -#if defined(__GNUC__) || defined(__clang__) -#define mi_unlikely(x) (__builtin_expect(!!(x),false)) -#define mi_likely(x) (__builtin_expect(!!(x),true)) -#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) -#define mi_unlikely(x) (x) [[unlikely]] -#define mi_likely(x) (x) [[likely]] -#else -#define mi_unlikely(x) (x) -#define mi_likely(x) (x) -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - - -/* ----------------------------------------------------------- - Error codes passed to `_mi_fatal_error` - All are recoverable but EFAULT is a serious error and aborts by default in secure mode. - For portability define undefined error codes using common Unix codes: - ------------------------------------------------------------ */ -#include -#ifndef EAGAIN // double free -#define EAGAIN (11) -#endif -#ifndef ENOMEM // out of memory -#define ENOMEM (12) -#endif -#ifndef EFAULT // corrupted free-list or meta-data -#define EFAULT (14) -#endif -#ifndef EINVAL // trying to free an invalid pointer -#define EINVAL (22) -#endif -#ifndef EOVERFLOW // count*size overflow -#define EOVERFLOW (75) -#endif - - -/* ----------------------------------------------------------- - Inlined definitions ------------------------------------------------------------ */ -#define MI_UNUSED(x) (void)(x) -#if (MI_DEBUG>0) -#define MI_UNUSED_RELEASE(x) -#else -#define MI_UNUSED_RELEASE(x) MI_UNUSED(x) -#endif - -#define MI_INIT4(x) x(),x(),x(),x() -#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x) -#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x) -#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x) -#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) -#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) -#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) - - -// Is `x` a power of two? (0 is considered a power of two) -static inline bool _mi_is_power_of_two(uintptr_t x) { - return ((x & (x - 1)) == 0); -} - -// Is a pointer aligned? -static inline bool _mi_is_aligned(void* p, size_t alignment) { - mi_assert_internal(alignment != 0); - return (((uintptr_t)p % alignment) == 0); -} - -// Align upwards -static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { - mi_assert_internal(alignment != 0); - uintptr_t mask = alignment - 1; - if ((alignment & mask) == 0) { // power of two? - return ((sz + mask) & ~mask); - } - else { - return (((sz + mask)/alignment)*alignment); - } -} - -// Align downwards -static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { - mi_assert_internal(alignment != 0); - uintptr_t mask = alignment - 1; - if ((alignment & mask) == 0) { // power of two? - return (sz & ~mask); - } - else { - return ((sz / alignment) * alignment); - } -} - -// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. -static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { - mi_assert_internal(divider != 0); - return (divider == 0 ? size : ((size + divider - 1) / divider)); -} - -// Is memory zero initialized? -static inline bool mi_mem_is_zero(void* p, size_t size) { - for (size_t i = 0; i < size; i++) { - if (((uint8_t*)p)[i] != 0) return false; - } - return true; -} - - -// Align a byte size to a size in _machine words_, -// i.e. byte size == `wsize*sizeof(void*)`. -static inline size_t _mi_wsize_from_size(size_t size) { - mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t)); - return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); -} - -// Overflow detecting multiply -#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5)) -#include // UINT_MAX, ULONG_MAX -#if defined(_CLOCK_T) // for Illumos -#undef _CLOCK_T -#endif -static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { - #if (SIZE_MAX == ULONG_MAX) - return __builtin_umull_overflow(count, size, (unsigned long *)total); - #elif (SIZE_MAX == UINT_MAX) - return __builtin_umul_overflow(count, size, (unsigned int *)total); - #else - return __builtin_umulll_overflow(count, size, (unsigned long long *)total); - #endif -} -#else /* __builtin_umul_overflow is unavailable */ -static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { - #define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) - *total = count * size; - // note: gcc/clang optimize this to directly check the overflow flag - return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count); -} -#endif - -// Safe multiply `count*size` into `total`; return `true` on overflow. -static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { - if (count==1) { // quick check for the case where count is one (common for C++ allocators) - *total = size; - return false; - } - else if mi_unlikely(mi_mul_overflow(count, size, total)) { - #if MI_DEBUG > 0 - _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); - #endif - *total = SIZE_MAX; - return true; - } - else return false; -} - - -/* ---------------------------------------------------------------------------------------- -The thread local default heap: `_mi_get_default_heap` returns the thread local heap. -On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a -__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures -that the storage will always be available (allocated on the thread stacks). -On some platforms though we cannot use that when overriding `malloc` since the underlying -TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. -We try to circumvent this in an efficient way: -- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the - loader itself calls `malloc` even before the modules are initialized. -- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). -- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) -------------------------------------------------------------------------------------------- */ - -extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap -extern bool _mi_process_is_initialized; -mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap - -#if defined(MI_MALLOC_OVERRIDE) -#if defined(__APPLE__) // macOS -#define MI_TLS_SLOT 89 // seems unused? -// #define MI_TLS_RECURSE_GUARD 1 -// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) -// see -#elif defined(__OpenBSD__) -// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) -// see -#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) -// #elif defined(__DragonFly__) -// #warning "mimalloc is not working correctly on DragonFly yet." -// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) -#elif defined(__ANDROID__) -// See issue #381 -#define MI_TLS_PTHREAD -#endif -#endif - -#if defined(MI_TLS_SLOT) -static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration -#elif defined(MI_TLS_PTHREAD_SLOT_OFS) -static inline mi_heap_t** mi_tls_pthread_heap_slot(void) { - pthread_t self = pthread_self(); - #if defined(__DragonFly__) - if (self==NULL) { - mi_heap_t* pheap_main = _mi_heap_main_get(); - return &pheap_main; - } - #endif - return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); -} -#elif defined(MI_TLS_PTHREAD) -extern pthread_key_t _mi_heap_default_key; -#endif - -// Default heap to allocate from (if not using TLS- or pthread slots). -// Do not use this directly but use through `mi_heap_get_default()` (or the unchecked `mi_get_default_heap`). -// This thread local variable is only used when neither MI_TLS_SLOT, MI_TLS_PTHREAD, or MI_TLS_PTHREAD_SLOT_OFS are defined. -// However, on the Apple M1 we do use the address of this variable as the unique thread-id (issue #356). -extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from - -static inline mi_heap_t* mi_get_default_heap(void) { -#if defined(MI_TLS_SLOT) - mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT); - if mi_unlikely(heap == NULL) { - #ifdef __GNUC__ - __asm(""); // prevent conditional load of the address of _mi_heap_empty - #endif - heap = (mi_heap_t*)&_mi_heap_empty; - } - return heap; -#elif defined(MI_TLS_PTHREAD_SLOT_OFS) - mi_heap_t* heap = *mi_tls_pthread_heap_slot(); - return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); -#elif defined(MI_TLS_PTHREAD) - mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); - return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); -#else - #if defined(MI_TLS_RECURSE_GUARD) - if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); - #endif - return _mi_heap_default; -#endif -} - -static inline bool mi_heap_is_default(const mi_heap_t* heap) { - return (heap == mi_get_default_heap()); -} - -static inline bool mi_heap_is_backing(const mi_heap_t* heap) { - return (heap->tld->heap_backing == heap); -} - -static inline bool mi_heap_is_initialized(mi_heap_t* heap) { - mi_assert_internal(heap != NULL); - return (heap != &_mi_heap_empty); -} - -static inline uintptr_t _mi_ptr_cookie(const void* p) { - extern mi_heap_t _mi_heap_main; - mi_assert_internal(_mi_heap_main.cookie != 0); - return ((uintptr_t)p ^ _mi_heap_main.cookie); -} - -/* ----------------------------------------------------------- - Pages ------------------------------------------------------------ */ - -static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { - mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE)); - const size_t idx = _mi_wsize_from_size(size); - mi_assert_internal(idx < MI_PAGES_DIRECT); - return heap->pages_free_direct[idx]; -} - -// Get the page belonging to a certain size class -static inline mi_page_t* _mi_get_free_small_page(size_t size) { - return _mi_heap_get_free_small_page(mi_get_default_heap(), size); -} - -// Segment that contains the pointer -static inline mi_segment_t* _mi_ptr_segment(const void* p) { - // mi_assert_internal(p != NULL); - return (mi_segment_t*)((uintptr_t)p & ~MI_SEGMENT_MASK); -} - -static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) { - mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0); - return (mi_page_t*)(s); -} - -static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) { - mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0); - return (mi_slice_t*)(p); -} - -// Segment belonging to a page -static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) { - mi_segment_t* segment = _mi_ptr_segment(page); - mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries)); - return segment; -} - -static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) { - mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset); - mi_assert_internal(start >= _mi_ptr_segment(slice)->slices); - mi_assert_internal(start->slice_offset == 0); - mi_assert_internal(start + start->slice_count > slice); - return start; -} - -// Get the page containing the pointer -static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) { - ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment; - mi_assert_internal(diff >= 0 && diff < (ptrdiff_t)MI_SEGMENT_SIZE); - size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT; - mi_assert_internal(idx < segment->slice_entries); - mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx]; - mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data - mi_assert_internal(slice->slice_offset == 0); - mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries); - return mi_slice_to_page(slice); -} - -// Quick page start for initialized pages -static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) { - return _mi_segment_page_start(segment, page, page_size); -} - -// Get the page containing the pointer -static inline mi_page_t* _mi_ptr_page(void* p) { - return _mi_segment_page_of(_mi_ptr_segment(p), p); -} - -// Get the block size of a page (special case for huge objects) -static inline size_t mi_page_block_size(const mi_page_t* page) { - const size_t bsize = page->xblock_size; - mi_assert_internal(bsize > 0); - if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) { - return bsize; - } - else { - size_t psize; - _mi_segment_page_start(_mi_page_segment(page), page, &psize); - return psize; - } -} - -// Get the usable block size of a page without fixed padding. -// This may still include internal padding due to alignment and rounding up size classes. -static inline size_t mi_page_usable_block_size(const mi_page_t* page) { - return mi_page_block_size(page) - MI_PADDING_SIZE; -} - -// size of a segment -static inline size_t mi_segment_size(mi_segment_t* segment) { - return segment->segment_slices * MI_SEGMENT_SLICE_SIZE; -} - -static inline uint8_t* mi_segment_end(mi_segment_t* segment) { - return (uint8_t*)segment + mi_segment_size(segment); -} - -// Thread free access -static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { - return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3); -} - -static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) { - return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3); -} - -// Heap access -static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { - return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap)); -} - -static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { - mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); - mi_atomic_store_release(&page->xheap,(uintptr_t)heap); -} - -// Thread free flag helpers -static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { - return (mi_block_t*)(tf & ~0x03); -} -static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) { - return (mi_delayed_t)(tf & 0x03); -} -static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) { - return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed); -} -static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) { - return mi_tf_make(mi_tf_block(tf),delayed); -} -static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) { - return mi_tf_make(block, mi_tf_delayed(tf)); -} - -// are all blocks in a page freed? -// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`. -static inline bool mi_page_all_free(const mi_page_t* page) { - mi_assert_internal(page != NULL); - return (page->used == 0); -} - -// are there any available blocks? -static inline bool mi_page_has_any_available(const mi_page_t* page) { - mi_assert_internal(page != NULL && page->reserved > 0); - return (page->used < page->reserved || (mi_page_thread_free(page) != NULL)); -} - -// are there immediately available blocks, i.e. blocks available on the free list. -static inline bool mi_page_immediate_available(const mi_page_t* page) { - mi_assert_internal(page != NULL); - return (page->free != NULL); -} - -// is more than 7/8th of a page in use? -static inline bool mi_page_mostly_used(const mi_page_t* page) { - if (page==NULL) return true; - uint16_t frac = page->reserved / 8U; - return (page->reserved - page->used <= frac); -} - -static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { - return &((mi_heap_t*)heap)->pages[_mi_bin(size)]; -} - - - -//----------------------------------------------------------- -// Page flags -//----------------------------------------------------------- -static inline bool mi_page_is_in_full(const mi_page_t* page) { - return page->flags.x.in_full; -} - -static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) { - page->flags.x.in_full = in_full; -} - -static inline bool mi_page_has_aligned(const mi_page_t* page) { - return page->flags.x.has_aligned; -} - -static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { - page->flags.x.has_aligned = has_aligned; -} - - -/* ------------------------------------------------------------------- -Encoding/Decoding the free list next pointers - -This is to protect against buffer overflow exploits where the -free list is mutated. Many hardened allocators xor the next pointer `p` -with a secret key `k1`, as `p^k1`. This prevents overwriting with known -values but might be still too weak: if the attacker can guess -the pointer `p` this can reveal `k1` (since `p^k1^p == k1`). -Moreover, if multiple blocks can be read as well, the attacker can -xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot -about the pointers (and subsequently `k1`). - -Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<> (MI_INTPTR_BITS - shift)))); -} -static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) { - shift %= MI_INTPTR_BITS; - return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift)))); -} - -static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) { - void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]); - return (p==null ? NULL : p); -} - -static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) { - uintptr_t x = (uintptr_t)(p==NULL ? null : p); - return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; -} - -static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { - mi_track_mem_defined(block,sizeof(mi_block_t)); - mi_block_t* next; - #ifdef MI_ENCODE_FREELIST - next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); - #else - MI_UNUSED(keys); MI_UNUSED(null); - next = (mi_block_t*)block->next; - #endif - mi_track_mem_noaccess(block,sizeof(mi_block_t)); - return next; -} - -static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { - mi_track_mem_undefined(block,sizeof(mi_block_t)); - #ifdef MI_ENCODE_FREELIST - block->next = mi_ptr_encode(null, next, keys); - #else - MI_UNUSED(keys); MI_UNUSED(null); - block->next = (mi_encoded_t)next; - #endif - mi_track_mem_noaccess(block,sizeof(mi_block_t)); -} - -static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { - #ifdef MI_ENCODE_FREELIST - mi_block_t* next = mi_block_nextx(page,block,page->keys); - // check for free list corruption: is `next` at least in the same page? - // TODO: check if `next` is `page->block_size` aligned? - if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { - _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); - next = NULL; - } - return next; - #else - MI_UNUSED(page); - return mi_block_nextx(page,block,NULL); - #endif -} - -static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) { - #ifdef MI_ENCODE_FREELIST - mi_block_set_nextx(page,block,next, page->keys); - #else - MI_UNUSED(page); - mi_block_set_nextx(page,block,next,NULL); - #endif -} - - -// ------------------------------------------------------------------- -// commit mask -// ------------------------------------------------------------------- - -static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - cm->mask[i] = 0; - } -} - -static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - cm->mask[i] = ~((size_t)0); - } -} - -static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - if (cm->mask[i] != 0) return false; - } - return true; -} - -static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - if (cm->mask[i] != ~((size_t)0)) return false; - } - return true; -} - -// defined in `segment.c`: -size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total); -size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx); - -#define mi_commit_mask_foreach(cm,idx,count) \ - idx = 0; \ - while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) { - -#define mi_commit_mask_foreach_end() \ - idx += count; \ - } - - - - -// ------------------------------------------------------------------- -// Fast "random" shuffle -// ------------------------------------------------------------------- - -static inline uintptr_t _mi_random_shuffle(uintptr_t x) { - if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros -#if (MI_INTPTR_SIZE==8) - // by Sebastiano Vigna, see: - x ^= x >> 30; - x *= 0xbf58476d1ce4e5b9UL; - x ^= x >> 27; - x *= 0x94d049bb133111ebUL; - x ^= x >> 31; -#elif (MI_INTPTR_SIZE==4) - // by Chris Wellons, see: - x ^= x >> 16; - x *= 0x7feb352dUL; - x ^= x >> 15; - x *= 0x846ca68bUL; - x ^= x >> 16; -#endif - return x; -} - -// ------------------------------------------------------------------- -// Optimize numa node access for the common case (= one node) -// ------------------------------------------------------------------- - -int _mi_os_numa_node_get(mi_os_tld_t* tld); -size_t _mi_os_numa_node_count_get(void); - -extern _Atomic(size_t) _mi_numa_node_count; -static inline int _mi_os_numa_node(mi_os_tld_t* tld) { - if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } - else return _mi_os_numa_node_get(tld); -} -static inline size_t _mi_os_numa_node_count(void) { - const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); - if mi_likely(count > 0) { return count; } - else return _mi_os_numa_node_count_get(); -} - - -// ------------------------------------------------------------------- -// Getting the thread id should be performant as it is called in the -// fast path of `_mi_free` and we specialize for various platforms. -// We only require _mi_threadid() to return a unique id for each thread. -// ------------------------------------------------------------------- -#if defined(_WIN32) - -#define WIN32_LEAN_AND_MEAN -#include -static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { - // Windows: works on Intel and ARM in both 32- and 64-bit - return (uintptr_t)NtCurrentTeb(); -} - -// We use assembly for a fast thread id on the main platforms. The TLS layout depends on -// both the OS and libc implementation so we use specific tests for each main platform. -// If you test on another platform and it works please send a PR :-) -// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. -#elif defined(__GNUC__) && ( \ - (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ - || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \ - || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ - || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ - || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ - ) - -static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept { - void* res; - const size_t ofs = (slot*sizeof(void*)); - #if defined(__i386__) - __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS - #elif defined(__APPLE__) && defined(__x86_64__) - __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS - #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) - __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI - #elif defined(__x86_64__) - __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS - #elif defined(__arm__) - void** tcb; MI_UNUSED(ofs); - __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); - res = tcb[slot]; - #elif defined(__aarch64__) - void** tcb; MI_UNUSED(ofs); - #if defined(__APPLE__) // M1, issue #343 - __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); - #else - __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); - #endif - res = tcb[slot]; - #endif - return res; -} - -// setting a tls slot is only used on macOS for now -static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { - const size_t ofs = (slot*sizeof(void*)); - #if defined(__i386__) - __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS - #elif defined(__APPLE__) && defined(__x86_64__) - __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS - #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) - __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI - #elif defined(__x86_64__) - __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS - #elif defined(__arm__) - void** tcb; MI_UNUSED(ofs); - __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); - tcb[slot] = value; - #elif defined(__aarch64__) - void** tcb; MI_UNUSED(ofs); - #if defined(__APPLE__) // M1, issue #343 - __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); - #else - __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); - #endif - tcb[slot] = value; - #endif -} - -static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { - #if defined(__BIONIC__) - // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id - // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 - return (uintptr_t)mi_tls_slot(1); - #else - // in all our other targets, slot 0 is the thread id - // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h - // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 - return (uintptr_t)mi_tls_slot(0); - #endif -} - -#else - -// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). -static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { - return (uintptr_t)&_mi_heap_default; -} - -#endif - - -// ----------------------------------------------------------------------- -// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero) -// ----------------------------------------------------------------------- - -#if defined(__GNUC__) - -#include // LONG_MAX -#define MI_HAVE_FAST_BITSCAN -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (INTPTR_MAX == LONG_MAX) - return __builtin_clzl(x); -#else - return __builtin_clzll(x); -#endif -} -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (INTPTR_MAX == LONG_MAX) - return __builtin_ctzl(x); -#else - return __builtin_ctzll(x); -#endif -} - -#elif defined(_MSC_VER) - -#include // LONG_MAX -#define MI_HAVE_FAST_BITSCAN -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; - unsigned long idx; -#if (INTPTR_MAX == LONG_MAX) - _BitScanReverse(&idx, x); -#else - _BitScanReverse64(&idx, x); -#endif - return ((MI_INTPTR_BITS - 1) - idx); -} -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; - unsigned long idx; -#if (INTPTR_MAX == LONG_MAX) - _BitScanForward(&idx, x); -#else - _BitScanForward64(&idx, x); -#endif - return idx; -} - -#else -static inline size_t mi_ctz32(uint32_t x) { - // de Bruijn multiplication, see - static const unsigned char debruijn[32] = { - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 - }; - if (x==0) return 32; - return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27]; -} -static inline size_t mi_clz32(uint32_t x) { - // de Bruijn multiplication, see - static const uint8_t debruijn[32] = { - 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, - 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0 - }; - if (x==0) return 32; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; - return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27]; -} - -static inline size_t mi_clz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (MI_INTPTR_BITS <= 32) - return mi_clz32((uint32_t)x); -#else - size_t count = mi_clz32((uint32_t)(x >> 32)); - if (count < 32) return count; - return (32 + mi_clz32((uint32_t)x)); -#endif -} -static inline size_t mi_ctz(uintptr_t x) { - if (x==0) return MI_INTPTR_BITS; -#if (MI_INTPTR_BITS <= 32) - return mi_ctz32((uint32_t)x); -#else - size_t count = mi_ctz32((uint32_t)x); - if (count < 32) return count; - return (32 + mi_ctz32((uint32_t)(x>>32))); -#endif -} - -#endif - -// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero) -static inline size_t mi_bsr(uintptr_t x) { - return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x)); -} - - -// --------------------------------------------------------------------------------- -// Provide our own `_mi_memcpy` for potential performance optimizations. -// -// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if -// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support -// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. -// --------------------------------------------------------------------------------- - -#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) -#include -#include -extern bool _mi_cpu_has_fsrm; -static inline void _mi_memcpy(void* dst, const void* src, size_t n) { - if (_mi_cpu_has_fsrm) { - __movsb((unsigned char*)dst, (const unsigned char*)src, n); - } - else { - memcpy(dst, src, n); - } -} -static inline void _mi_memzero(void* dst, size_t n) { - if (_mi_cpu_has_fsrm) { - __stosb((unsigned char*)dst, 0, n); - } - else { - memset(dst, 0, n); - } -} -#else -#include -static inline void _mi_memcpy(void* dst, const void* src, size_t n) { - memcpy(dst, src, n); -} -static inline void _mi_memzero(void* dst, size_t n) { - memset(dst, 0, n); -} -#endif - - -// ------------------------------------------------------------------------------- -// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned -// This is used for example in `mi_realloc`. -// ------------------------------------------------------------------------------- - -#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) -// On GCC/CLang we provide a hint that the pointers are word aligned. -#include -static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { - mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); - void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); - const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE); - _mi_memcpy(adst, asrc, n); -} - -static inline void _mi_memzero_aligned(void* dst, size_t n) { - mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); - void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); - _mi_memzero(adst, n); -} -#else -// Default fallback on `_mi_memcpy` -static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { - mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); - _mi_memcpy(dst, src, n); -} - -static inline void _mi_memzero_aligned(void* dst, size_t n) { - mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); - _mi_memzero(dst, n); -} -#endif - - -#endif diff --git a/depends/mimalloc/include/mimalloc-new-delete.h b/depends/mimalloc/include/mimalloc-new-delete.h index 1c12fad2fc23..c16f4a6653d9 100644 --- a/depends/mimalloc/include/mimalloc-new-delete.h +++ b/depends/mimalloc/include/mimalloc-new-delete.h @@ -22,17 +22,26 @@ terms of the MIT license. A copy of the license can be found in the file #include #include + #if defined(_MSC_VER) && defined(_Ret_notnull_) && defined(_Post_writable_byte_size_) + // stay consistent with VCRT definitions + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict _Ret_notnull_ _Post_writable_byte_size_(n) + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(n) + #else + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict + #endif + void operator delete(void* p) noexcept { mi_free(p); }; void operator delete[](void* p) noexcept { mi_free(p); }; void operator delete (void* p, const std::nothrow_t&) noexcept { mi_free(p); } void operator delete[](void* p, const std::nothrow_t&) noexcept { mi_free(p); } - void* operator new(std::size_t n) noexcept(false) { return mi_new(n); } - void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); } + mi_decl_new(n) void* operator new(std::size_t n) noexcept(false) { return mi_new(n); } + mi_decl_new(n) void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); } - void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } - void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + mi_decl_new_nothrow(n) void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + mi_decl_new_nothrow(n) void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } #if (__cplusplus >= 201402L || _MSC_VER >= 1916) void operator delete (void* p, std::size_t n) noexcept { mi_free_size(p,n); }; @@ -46,7 +55,7 @@ terms of the MIT license. A copy of the license can be found in the file void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } - + void* operator new (std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } void* operator new[](std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } diff --git a/depends/mimalloc/include/mimalloc-override.h b/depends/mimalloc/include/mimalloc-override.h index c63b0b91a7c2..48a8a6226a05 100644 --- a/depends/mimalloc/include/mimalloc-override.h +++ b/depends/mimalloc/include/mimalloc-override.h @@ -24,7 +24,7 @@ not accidentally mix pointers from different allocators). #define free(p) mi_free(p) #define strdup(s) mi_strdup(s) -#define strndup(s,n) mi_strndup(s,n) +#define strndup(s,n) mi_strndup(s,n) #define realpath(f,n) mi_realpath(f,n) // Microsoft extensions @@ -43,6 +43,7 @@ not accidentally mix pointers from different allocators). #define reallocf(p,n) mi_reallocf(p,n) #define malloc_size(p) mi_usable_size(p) #define malloc_usable_size(p) mi_usable_size(p) +#define malloc_good_size(sz) mi_malloc_good_size(sz) #define cfree(p) mi_free(p) #define valloc(n) mi_valloc(n) diff --git a/depends/mimalloc/include/mimalloc-stats.h b/depends/mimalloc/include/mimalloc-stats.h new file mode 100644 index 000000000000..631f43bbec88 --- /dev/null +++ b/depends/mimalloc/include/mimalloc-stats.h @@ -0,0 +1,117 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_STATS_H +#define MIMALLOC_STATS_H + +#include +#include + +#define MI_STAT_VERSION 2 // increased on every backward incompatible change + +// count allocation over time +typedef struct mi_stat_count_s { + int64_t total; // total allocated + int64_t peak; // peak allocation + int64_t current; // current allocation +} mi_stat_count_t; + +// counters only increase +typedef struct mi_stat_counter_s { + int64_t total; // total count +} mi_stat_counter_t; + +#define MI_STAT_FIELDS() \ + MI_STAT_COUNT(pages) /* count of mimalloc pages */ \ + MI_STAT_COUNT(reserved) /* reserved memory bytes */ \ + MI_STAT_COUNT(committed) /* committed bytes */ \ + MI_STAT_COUNT(reset) /* reset bytes */ \ + MI_STAT_COUNT(purged) /* purged bytes */ \ + MI_STAT_COUNT(page_committed) /* committed memory inside pages */ \ + MI_STAT_COUNT(pages_abandoned) /* abandonded pages count */ \ + MI_STAT_COUNT(threads) /* number of threads */ \ + MI_STAT_COUNT(malloc_normal) /* allocated bytes <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNT(malloc_huge) /* allocated bytes in huge pages */ \ + MI_STAT_COUNT(malloc_requested) /* malloc requested bytes */ \ + \ + MI_STAT_COUNTER(mmap_calls) \ + MI_STAT_COUNTER(commit_calls) \ + MI_STAT_COUNTER(reset_calls) \ + MI_STAT_COUNTER(purge_calls) \ + MI_STAT_COUNTER(arena_count) /* number of memory arena's */ \ + MI_STAT_COUNTER(malloc_normal_count) /* number of blocks <= MI_LARGE_OBJ_SIZE_MAX */ \ + MI_STAT_COUNTER(malloc_huge_count) /* number of huge bloks */ \ + MI_STAT_COUNTER(malloc_guarded_count) /* number of allocations with guard pages */ \ + \ + /* internal statistics */ \ + MI_STAT_COUNTER(arena_rollback_count) \ + MI_STAT_COUNTER(arena_purges) \ + MI_STAT_COUNTER(pages_extended) /* number of page extensions */ \ + MI_STAT_COUNTER(pages_retire) /* number of pages that are retired */ \ + MI_STAT_COUNTER(page_searches) /* searches for a fresh page */ \ + /* only on v1 and v2 */ \ + MI_STAT_COUNT(segments) \ + MI_STAT_COUNT(segments_abandoned) \ + MI_STAT_COUNT(segments_cache) \ + MI_STAT_COUNT(_segments_reserved) \ + /* only on v3 */ \ + MI_STAT_COUNTER(pages_reclaim_on_alloc) \ + MI_STAT_COUNTER(pages_reclaim_on_free) \ + MI_STAT_COUNTER(pages_reabandon_full) \ + MI_STAT_COUNTER(pages_unabandon_busy_wait) \ + + +// Size bins for chunks +typedef enum mi_chunkbin_e { + MI_CBIN_SMALL, // slice_count == 1 + MI_CBIN_OTHER, // slice_count: any other from the other bins, and 1 <= slice_count <= MI_BCHUNK_BITS + MI_CBIN_MEDIUM, // slice_count == 8 + MI_CBIN_LARGE, // slice_count == MI_SIZE_BITS (only used if MI_ENABLE_LARGE_PAGES is 1) + MI_CBIN_NONE, // no bin assigned yet (the chunk is completely free) + MI_CBIN_COUNT +} mi_chunkbin_t; + + +// Define the statistics structure +#define MI_BIN_HUGE (73U) // see types.h +#define MI_STAT_COUNT(stat) mi_stat_count_t stat; +#define MI_STAT_COUNTER(stat) mi_stat_counter_t stat; + +typedef struct mi_stats_s +{ + int version; + + MI_STAT_FIELDS() + + // future extension + mi_stat_count_t _stat_reserved[4]; + mi_stat_counter_t _stat_counter_reserved[4]; + + // size segregated statistics + mi_stat_count_t malloc_bins[MI_BIN_HUGE+1]; // allocation per size bin + mi_stat_count_t page_bins[MI_BIN_HUGE+1]; // pages allocated per size bin + mi_stat_count_t chunk_bins[MI_CBIN_COUNT]; // chunks per page sizes +} mi_stats_t; + +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + + +// Exported definitions +#ifdef __cplusplus +extern "C" { +#endif + +mi_decl_export void mi_stats_get( size_t stats_size, mi_stats_t* stats ) mi_attr_noexcept; +mi_decl_export char* mi_stats_get_json( size_t buf_size, char* buf ) mi_attr_noexcept; // use mi_free to free the result if the input buf == NULL +mi_decl_export size_t mi_stats_get_bin_size(size_t bin) mi_attr_noexcept; + +#ifdef __cplusplus +} +#endif + +#endif // MIMALLOC_STATS_H diff --git a/depends/mimalloc/include/mimalloc-track.h b/depends/mimalloc/include/mimalloc-track.h deleted file mode 100644 index bb9df4fa35f7..000000000000 --- a/depends/mimalloc/include/mimalloc-track.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2018-2021, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ -#pragma once -#ifndef MIMALLOC_TRACK_H -#define MIMALLOC_TRACK_H - -// ------------------------------------------------------ -// Track memory ranges with macros for tools like Valgrind -// or other memory checkers. -// ------------------------------------------------------ - -#if MI_VALGRIND - -#define MI_TRACK_ENABLED 1 - -#include -#include - -#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) -#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) -#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) -#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) -#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) -#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) - -#else - -#define MI_TRACK_ENABLED 0 - -#define mi_track_malloc(p,size,zero) -#define mi_track_resize(p,oldsize,newsize) -#define mi_track_free(p) -#define mi_track_mem_defined(p,size) -#define mi_track_mem_undefined(p,size) -#define mi_track_mem_noaccess(p,size) - -#endif - -#endif diff --git a/depends/mimalloc/include/mimalloc-types.h b/depends/mimalloc/include/mimalloc-types.h deleted file mode 100644 index 800d94136cc7..000000000000 --- a/depends/mimalloc/include/mimalloc-types.h +++ /dev/null @@ -1,602 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2018-2021, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ -#pragma once -#ifndef MIMALLOC_TYPES_H -#define MIMALLOC_TYPES_H - -#include // ptrdiff_t -#include // uintptr_t, uint16_t, etc -#include "mimalloc-atomic.h" // _Atomic - -#ifdef _MSC_VER -#pragma warning(disable:4214) // bitfield is not int -#endif - -// Minimal alignment necessary. On most platforms 16 bytes are needed -// due to SSE registers for example. This must be at least `sizeof(void*)` -#ifndef MI_MAX_ALIGN_SIZE -#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) -#endif - -// ------------------------------------------------------ -// Variants -// ------------------------------------------------------ - -// Define NDEBUG in the release version to disable assertions. -// #define NDEBUG - -// Define MI_VALGRIND to enable valgrind support -// #define MI_VALGRIND 1 - -// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). -// #define MI_STAT 1 - -// Define MI_SECURE to enable security mitigations -// #define MI_SECURE 1 // guard page around metadata -// #define MI_SECURE 2 // guard page around each mimalloc page -// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free) -// #define MI_SECURE 4 // checks for double free. (may be more expensive) - -#if !defined(MI_SECURE) -#define MI_SECURE 0 -#endif - -// Define MI_DEBUG for debug mode -// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free. -// #define MI_DEBUG 2 // + internal assertion checks -// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) -#if !defined(MI_DEBUG) -#if !defined(NDEBUG) || defined(_DEBUG) -#define MI_DEBUG 2 -#else -#define MI_DEBUG 0 -#endif -#endif - -// Reserve extra padding at the end of each block to be more resilient against heap block overflows. -// The padding can detect byte-precise buffer overflow on free. -#if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND) -#define MI_PADDING 1 -#endif - - -// Encoded free lists allow detection of corrupted free lists -// and can detect buffer overflows, modify after free, and double `free`s. -#if (MI_SECURE>=3 || MI_DEBUG>=1) -#define MI_ENCODE_FREELIST 1 -#endif - - -// ------------------------------------------------------ -// Platform specific values -// ------------------------------------------------------ - -// ------------------------------------------------------ -// Size of a pointer. -// We assume that `sizeof(void*)==sizeof(intptr_t)` -// and it holds for all platforms we know of. -// -// However, the C standard only requires that: -// p == (void*)((intptr_t)p)) -// but we also need: -// i == (intptr_t)((void*)i) -// or otherwise one might define an intptr_t type that is larger than a pointer... -// ------------------------------------------------------ - -#if INTPTR_MAX > INT64_MAX -# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example) -#elif INTPTR_MAX == INT64_MAX -# define MI_INTPTR_SHIFT (3) -#elif INTPTR_MAX == INT32_MAX -# define MI_INTPTR_SHIFT (2) -#else -#error platform pointers must be 32, 64, or 128 bits -#endif - -#if SIZE_MAX == UINT64_MAX -# define MI_SIZE_SHIFT (3) -typedef int64_t mi_ssize_t; -#elif SIZE_MAX == UINT32_MAX -# define MI_SIZE_SHIFT (2) -typedef int32_t mi_ssize_t; -#else -#error platform objects must be 32 or 64 bits -#endif - -#if (SIZE_MAX/2) > LONG_MAX -# define MI_ZU(x) x##ULL -# define MI_ZI(x) x##LL -#else -# define MI_ZU(x) x##UL -# define MI_ZI(x) x##L -#endif - -#define MI_INTPTR_SIZE (1< 4 -#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64MiB -#else -#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit -#endif - -#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB -#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB - - -// Derived constants -#define MI_SEGMENT_SIZE (MI_ZU(1)<= 655360) -#error "mimalloc internal: define more bins" -#endif -#if (MI_ALIGNMENT_MAX > MI_SEGMENT_SIZE/2) -#error "mimalloc internal: the max aligned boundary is too large for the segment size" -#endif -#if (MI_ALIGNED_MAX % MI_SEGMENT_SLICE_SIZE != 0) -#error "mimalloc internal: the max aligned boundary must be an integral multiple of the segment slice size" -#endif - -// Maximum slice offset (15) -#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1) - -// Used as a special value to encode block sizes in 32 bits. -#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB)) - -// blocks up to this size are always allocated aligned -#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE) - - - - -// ------------------------------------------------------ -// Mimalloc pages contain allocated blocks -// ------------------------------------------------------ - -// The free lists use encoded next fields -// (Only actually encodes when MI_ENCODED_FREELIST is defined.) -typedef uintptr_t mi_encoded_t; - -// thread id's -typedef size_t mi_threadid_t; - -// free lists contain blocks -typedef struct mi_block_s { - mi_encoded_t next; -} mi_block_t; - - -// The delayed flags are used for efficient multi-threaded free-ing -typedef enum mi_delayed_e { - MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list - MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap - MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list - MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim -} mi_delayed_t; - - -// The `in_full` and `has_aligned` page flags are put in a union to efficiently -// test if both are false (`full_aligned == 0`) in the `mi_free` routine. -#if !MI_TSAN -typedef union mi_page_flags_s { - uint8_t full_aligned; - struct { - uint8_t in_full : 1; - uint8_t has_aligned : 1; - } x; -} mi_page_flags_t; -#else -// under thread sanitizer, use a byte for each flag to suppress warning, issue #130 -typedef union mi_page_flags_s { - uint16_t full_aligned; - struct { - uint8_t in_full; - uint8_t has_aligned; - } x; -} mi_page_flags_t; -#endif - -// Thread free list. -// We use the bottom 2 bits of the pointer for mi_delayed_t flags -typedef uintptr_t mi_thread_free_t; - -// A page contains blocks of one specific size (`block_size`). -// Each page has three list of free blocks: -// `free` for blocks that can be allocated, -// `local_free` for freed blocks that are not yet available to `mi_malloc` -// `thread_free` for freed blocks by other threads -// The `local_free` and `thread_free` lists are migrated to the `free` list -// when it is exhausted. The separate `local_free` list is necessary to -// implement a monotonic heartbeat. The `thread_free` list is needed for -// avoiding atomic operations in the common case. -// -// -// `used - |thread_free|` == actual blocks that are in use (alive) -// `used - |thread_free| + |free| + |local_free| == capacity` -// -// We don't count `freed` (as |free|) but use `used` to reduce -// the number of memory accesses in the `mi_page_all_free` function(s). -// -// Notes: -// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`) -// - Using `uint16_t` does not seem to slow things down -// - The size is 8 words on 64-bit which helps the page index calculations -// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 -// and 12 are still good for address calculation) -// - To limit the structure size, the `xblock_size` is 32-bits only; for -// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size -// - `thread_free` uses the bottom bits as a delayed-free flags to optimize -// concurrent frees where only the first concurrent free adds to the owning -// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`). -// The invariant is that no-delayed-free is only set if there is -// at least one block that will be added, or as already been added, to -// the owning heap `thread_delayed_free` list. This guarantees that pages -// will be freed correctly even if only other threads free blocks. -typedef struct mi_page_s { - // "owned" by the segment - uint32_t slice_count; // slices in this page (0 if not a page) - uint32_t slice_offset; // distance from the actual page data slice (0 if a page) - uint8_t is_reset : 1; // `true` if the page memory was reset - uint8_t is_committed : 1; // `true` if the page virtual memory is committed - uint8_t is_zero_init : 1; // `true` if the page was zero initialized - - // layout like this to optimize access in `mi_malloc` and `mi_free` - uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` - uint16_t reserved; // number of blocks reserved in memory - mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) - uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized - uint8_t retire_expire : 7; // expiration count for retired blocks - - mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) - #ifdef MI_ENCODE_FREELIST - uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) - #endif - uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) - uint32_t xblock_size; // size available in each block (always `>0`) - - mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) - _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads - _Atomic(uintptr_t) xheap; - - struct mi_page_s* next; // next page owned by this thread with the same `block_size` - struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` - - // 64-bit 9 words, 32-bit 12 words, (+2 for secure) - #if MI_INTPTR_SIZE==8 - uintptr_t padding[1]; - #endif -} mi_page_t; - - - -typedef enum mi_page_kind_e { - MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment - MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment - MI_PAGE_LARGE, // larger blocks go into a page of just one block - MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment. -} mi_page_kind_t; - -typedef enum mi_segment_kind_e { - MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside. - MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside. -} mi_segment_kind_t; - -// ------------------------------------------------------ -// A segment holds a commit mask where a bit is set if -// the corresponding MI_COMMIT_SIZE area is committed. -// The MI_COMMIT_SIZE must be a multiple of the slice -// size. If it is equal we have the most fine grained -// decommit (but setting it higher can be more efficient). -// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will -// be committed in one go which can be set higher than -// MI_COMMIT_SIZE for efficiency (while the decommit mask -// is still tracked in fine-grained MI_COMMIT_SIZE chunks) -// ------------------------------------------------------ - -#define MI_MINIMAL_COMMIT_SIZE (2*MI_MiB) -#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB -#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) -#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS -#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS) - -#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS)) -#error "the segment size must be exactly divisible by the (commit size * size_t bits)" -#endif - -typedef struct mi_commit_mask_s { - size_t mask[MI_COMMIT_MASK_FIELD_COUNT]; -} mi_commit_mask_t; - -typedef mi_page_t mi_slice_t; -typedef int64_t mi_msecs_t; - - -// Segments are large allocated memory blocks (8mb on 64 bit) from -// the OS. Inside segments we allocated fixed size _pages_ that -// contain blocks. -typedef struct mi_segment_s { - size_t memid; // memory id for arena allocation - bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages) - bool mem_is_large; // in large/huge os pages? - bool mem_is_committed; // `true` if the whole segment is eagerly committed - - bool allow_decommit; - mi_msecs_t decommit_expire; - mi_commit_mask_t decommit_mask; - mi_commit_mask_t commit_mask; - - _Atomic(struct mi_segment_s*) abandoned_next; - - // from here is zero initialized - struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) - - size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) - size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long) - size_t used; // count of pages in use - uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` - - size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` - size_t segment_info_slices; // initial slices we are using segment info and possible guard pages. - - // layout like this to optimize access in `mi_free` - mi_segment_kind_t kind; - _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment - size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT` - mi_slice_t slices[MI_SLICES_PER_SEGMENT]; -} mi_segment_t; - - -// ------------------------------------------------------ -// Heaps -// Provide first-class heaps to allocate from. -// A heap just owns a set of pages for allocation and -// can only be allocate/reallocate from the thread that created it. -// Freeing blocks can be done from any thread though. -// Per thread, the segments are shared among its heaps. -// Per thread, there is always a default heap that is -// used for allocation; it is initialized to statically -// point to an empty heap to avoid initialization checks -// in the fast path. -// ------------------------------------------------------ - -// Thread local data -typedef struct mi_tld_s mi_tld_t; - -// Pages of a certain block size are held in a queue. -typedef struct mi_page_queue_s { - mi_page_t* first; - mi_page_t* last; - size_t block_size; -} mi_page_queue_t; - -#define MI_BIN_FULL (MI_BIN_HUGE+1) - -// Random context -typedef struct mi_random_cxt_s { - uint32_t input[16]; - uint32_t output[16]; - int output_available; -} mi_random_ctx_t; - - -// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows -#if (MI_PADDING) -typedef struct mi_padding_s { - uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) - uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) -} mi_padding_t; -#define MI_PADDING_SIZE (sizeof(mi_padding_t)) -#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) -#else -#define MI_PADDING_SIZE 0 -#define MI_PADDING_WSIZE 0 -#endif - -#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) - - -// A heap owns a set of pages. -struct mi_heap_s { - mi_tld_t* tld; - mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. - mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") - _Atomic(mi_block_t*) thread_delayed_free; - mi_threadid_t thread_id; // thread this heap belongs too - mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) - uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) - uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list - mi_random_ctx_t random; // random number context used for secure allocation - size_t page_count; // total number of pages in the `pages` queues. - size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) - size_t page_retired_max; // largest retired index into the `pages` array. - mi_heap_t* next; // list of heaps per thread - bool no_reclaim; // `true` if this heap should not reclaim abandoned pages -}; - - - -// ------------------------------------------------------ -// Debug -// ------------------------------------------------------ - -#if !defined(MI_DEBUG_UNINIT) -#define MI_DEBUG_UNINIT (0xD0) -#endif -#if !defined(MI_DEBUG_FREED) -#define MI_DEBUG_FREED (0xDF) -#endif -#if !defined(MI_DEBUG_PADDING) -#define MI_DEBUG_PADDING (0xDE) -#endif - -#if (MI_DEBUG) -// use our own assertion to print without memory allocation -void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); -#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) -#else -#define mi_assert(x) -#endif - -#if (MI_DEBUG>1) -#define mi_assert_internal mi_assert -#else -#define mi_assert_internal(x) -#endif - -#if (MI_DEBUG>2) -#define mi_assert_expensive mi_assert -#else -#define mi_assert_expensive(x) -#endif - -// ------------------------------------------------------ -// Statistics -// ------------------------------------------------------ - -#ifndef MI_STAT -#if (MI_DEBUG>0) -#define MI_STAT 2 -#else -#define MI_STAT 0 -#endif -#endif - -typedef struct mi_stat_count_s { - int64_t allocated; - int64_t freed; - int64_t peak; - int64_t current; -} mi_stat_count_t; - -typedef struct mi_stat_counter_s { - int64_t total; - int64_t count; -} mi_stat_counter_t; - -typedef struct mi_stats_s { - mi_stat_count_t segments; - mi_stat_count_t pages; - mi_stat_count_t reserved; - mi_stat_count_t committed; - mi_stat_count_t reset; - mi_stat_count_t page_committed; - mi_stat_count_t segments_abandoned; - mi_stat_count_t pages_abandoned; - mi_stat_count_t threads; - mi_stat_count_t normal; - mi_stat_count_t huge; - mi_stat_count_t large; - mi_stat_count_t malloc; - mi_stat_count_t segments_cache; - mi_stat_counter_t pages_extended; - mi_stat_counter_t mmap_calls; - mi_stat_counter_t commit_calls; - mi_stat_counter_t page_no_retire; - mi_stat_counter_t searches; - mi_stat_counter_t normal_count; - mi_stat_counter_t huge_count; - mi_stat_counter_t large_count; -#if MI_STAT>1 - mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; -#endif -} mi_stats_t; - - -void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); -void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); -void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); - -#if (MI_STAT) -#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) -#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) -#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) -#else -#define mi_stat_increase(stat,amount) (void)0 -#define mi_stat_decrease(stat,amount) (void)0 -#define mi_stat_counter_increase(stat,amount) (void)0 -#endif - -#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) -#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) -#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) - -// ------------------------------------------------------ -// Thread Local data -// ------------------------------------------------------ - -// A "span" is is an available range of slices. The span queues keep -// track of slice spans of at most the given `slice_count` (but more than the previous size class). -typedef struct mi_span_queue_s { - mi_slice_t* first; - mi_slice_t* last; - size_t slice_count; -} mi_span_queue_t; - -#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) - -// OS thread local data -typedef struct mi_os_tld_s { - size_t region_idx; // start point for next allocation - mi_stats_t* stats; // points to tld stats -} mi_os_tld_t; - - -// Segments thread local data -typedef struct mi_segments_tld_s { - mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments - size_t count; // current number of segments; - size_t peak_count; // peak number of segments - size_t current_size; // current size of all segments - size_t peak_size; // peak size of all segments - mi_stats_t* stats; // points to tld stats - mi_os_tld_t* os; // points to os stats -} mi_segments_tld_t; - -// Thread local data -struct mi_tld_s { - unsigned long long heartbeat; // monotonic heartbeat count - bool recurse; // true if deferred was called; used to prevent infinite recursion. - mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) - mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) - mi_segments_tld_t segments; // segment tld - mi_os_tld_t os; // os tld - mi_stats_t stats; // statistics -}; - -#endif diff --git a/depends/mimalloc/include/mimalloc.h b/depends/mimalloc/include/mimalloc.h index 32eab19ea5a5..aa4222f69f5b 100644 --- a/depends/mimalloc/include/mimalloc.h +++ b/depends/mimalloc/include/mimalloc.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 207 // major + 2 digits minor +#define MI_MALLOC_VERSION 315 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes @@ -28,6 +28,8 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_decl_nodiscard [[nodiscard]] #elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#elif defined(_HAS_NODISCARD) + #define mi_decl_nodiscard _NODISCARD #elif (_MSC_VER >= 1700) #define mi_decl_nodiscard _Check_return_ #else @@ -95,7 +97,6 @@ terms of the MIT license. A copy of the license can be found in the file #include // size_t #include // bool -#include // INTPTR_MAX #ifdef __cplusplus extern "C" { @@ -152,26 +153,26 @@ mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; +mi_decl_export void mi_options_print(void) mi_attr_noexcept; + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, + size_t* current_rss, size_t* peak_rss, + size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + +// Generally do not use the following as these are usually called automatically mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_cdecl mi_process_done(void) mi_attr_noexcept; mi_decl_export void mi_thread_init(void) mi_attr_noexcept; mi_decl_export void mi_thread_done(void) mi_attr_noexcept; -mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; -mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, - size_t* current_rss, size_t* peak_rss, - size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; // ------------------------------------------------------------------------------------- // Aligned allocation // Note that `alignment` always follows `size` for consistency with unaligned // allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. // ------------------------------------------------------------------------------------- -#if (INTPTR_MAX > INT32_MAX) -#define MI_ALIGNMENT_MAX (16*1024*1024UL) // maximum supported alignment is 16MiB -#else -#define MI_ALIGNMENT_MAX (1024*1024UL) // maximum supported alignment for 32-bit systems is 1MiB -#endif mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); @@ -262,37 +263,89 @@ typedef struct mi_heap_area_s { size_t used; // number of allocated blocks size_t block_size; // size in bytes of each block size_t full_block_size; // size in bytes of a full block including padding and metadata. + int heap_tag; // heap tag associated with this area } mi_heap_area_t; typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); -mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); -// Experimental +// Advanced mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; -mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; -mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; -mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; -mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_pinned /* cannot decommit/reset? */, bool is_zero, int numa_node) mi_attr_noexcept; -mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept; +mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept; +mi_decl_export void mi_arenas_print(void) mi_attr_noexcept; +mi_decl_export size_t mi_arena_min_alignment(void); -// Experimental: heaps associated with specific memory arena's -typedef int mi_arena_id_t; -mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); -mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; -mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; -mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +// Advanced: heaps associated with specific memory arena's +typedef void* mi_arena_id_t; +mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); +mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_pinned, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; -#if MI_MALLOC_VERSION >= 200 +#if MI_MALLOC_VERSION >= 182 +// Create a heap that only allocates in the specified arena mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); #endif + +// Advanced: allow sub-processes whose memory areas stay separated (and no reclamation between them) +// Used for example for separate interpreters in one process. +typedef void* mi_subproc_id_t; +mi_decl_export mi_subproc_id_t mi_subproc_main(void); +mi_decl_export mi_subproc_id_t mi_subproc_new(void); +mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc); +mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet) + +// Advanced: visit abandoned heap areas (that are not owned by a specific heap) +mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental: set numa-affinity of a heap +mi_decl_export void mi_heap_set_numa_affinity(mi_heap_t* heap, int numa_node); + +// Experimental: objects followed by a guard page. +// Setting the sample rate on a specific heap can be used to test parts of the program more +// specifically (in combination with `mi_heap_set_default`). +// A sample rate of 0 disables guarded objects, while 1 uses a guard page for every object. +// A seed of 0 uses a random start point. Only objects within the size bound are eligable for guard pages. +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed); +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max); + +// Experimental: communicate that the thread is part of a threadpool +mi_decl_export void mi_thread_set_in_threadpool(void) mi_attr_noexcept; + +// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread +// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will +// fall back to `mi_heap_delete`. +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id); + // deprecated -mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; +mi_decl_export void mi_collect_reduce(size_t target_thread_owned) mi_attr_noexcept; + + + +// experimental +typedef bool (mi_cdecl mi_commit_fun_t)(bool commit, void* start, size_t size, bool* is_zero, void* user_arg); +mi_decl_export bool mi_manage_memory(void* start, size_t size, bool is_committed, bool is_pinned, bool is_zero, int numa_node, bool exclusive, + mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept; + +mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* accessed_size, size_t* size); +mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id); +mi_decl_export bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena); +mi_decl_export void mi_heap_unload(mi_heap_t* heap); + + +// Is a pointer contained in the given arena area? +mi_decl_export bool mi_arena_contains(mi_arena_id_t arena_id, const void* p); // ------------------------------------------------------ @@ -320,34 +373,58 @@ mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size typedef enum mi_option_e { // stable options - mi_option_show_errors, - mi_option_show_stats, - mi_option_verbose, - // some of the following options are experimental - // (deprecated options are kept for binary backward compatibility with v1.x versions) - mi_option_eager_commit, - mi_option_deprecated_eager_region_commit, - mi_option_deprecated_reset_decommits, - mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit - mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup - mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node - mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup + mi_option_show_errors, // print error messages + mi_option_show_stats, // print statistics on termination + mi_option_verbose, // print verbose messages + // advanced options + mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1) + mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2) + mi_option_purge_decommits, // should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit) + mi_option_allow_large_os_pages, // allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process. + mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup + mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node + mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`) mi_option_deprecated_segment_cache, - mi_option_page_reset, - mi_option_abandoned_page_decommit, + mi_option_deprecated_page_reset, + mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination mi_option_deprecated_segment_reset, - mi_option_eager_commit_delay, - mi_option_decommit_delay, - mi_option_use_numa_nodes, // 0 = use available numa nodes, otherwise use at most N nodes. - mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only reserved arenas) - mi_option_os_tag, - mi_option_max_errors, - mi_option_max_warnings, - mi_option_max_segment_reclaim, - mi_option_allow_decommit, - mi_option_segment_decommit_delay, - mi_option_decommit_extend_delay, - _mi_option_last + mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) + mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10) + mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. + mi_option_disallow_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) + mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100) + mi_option_max_errors, // issue at most N error messages + mi_option_max_warnings, // issue at most N warning messages + mi_option_deprecated_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) + mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe + mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10) + mi_option_deprecated_purge_extend_delay, + mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) + mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) + mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0) + mi_option_guarded_min, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects (=0) + mi_option_guarded_max, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects (=0) + mi_option_guarded_precise, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) + mi_option_guarded_sample_rate, // 1 out of N allocations in the min/max range will be guarded (=1000) + mi_option_guarded_sample_seed, // can be set to allow for a (more) deterministic re-execution when a guard page is triggered (=0) + mi_option_generic_collect, // collect heaps every N (=10000) generic allocation calls + mi_option_page_reclaim_on_free, // reclaim abandoned pages on a free (=0). -1 disallowr always, 0 allows if the page originated from the current heap, 1 allow always + mi_option_page_full_retain, // retain N full (small) pages per size class (=2) + mi_option_page_max_candidates, // max candidate pages to consider for allocation (=4) + mi_option_max_vabits, // max user space virtual address bits to consider (=48) + mi_option_pagemap_commit, // commit the full pagemap (to always catch invalid pointer uses) (=0) + mi_option_page_commit_on_demand, // commit page memory on-demand + mi_option_page_max_reclaim, // don't reclaim pages of the same originating heap if we already own N pages (in that size class) (=-1 (unlimited)) + mi_option_page_cross_thread_max_reclaim, // don't reclaim pages across threads if we already own N pages (in that size class) (=16) + _mi_option_last, + // legacy option names + mi_option_large_os_pages = mi_option_allow_large_os_pages, + mi_option_eager_region_commit = mi_option_arena_eager_commit, + mi_option_reset_decommits = mi_option_purge_decommits, + mi_option_reset_delay = mi_option_purge_delay, + mi_option_abandoned_page_reset = mi_option_abandoned_page_purge, + mi_option_limit_os_alloc = mi_option_disallow_os_alloc } mi_option_t; @@ -357,8 +434,9 @@ mi_decl_export void mi_option_disable(mi_option_t option); mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); -mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); -mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); +mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option); mi_decl_export void mi_option_set(mi_option_t option, long value); mi_decl_export void mi_option_set_default(mi_option_t option, long value); @@ -405,6 +483,9 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, s mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); + #ifdef __cplusplus } #endif @@ -422,7 +503,7 @@ mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, #include // std::forward #endif -template struct mi_stl_allocator { +template struct _mi_stl_allocator_common { typedef T value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; @@ -430,6 +511,27 @@ template struct mi_stl_allocator { typedef value_type const& const_reference; typedef value_type* pointer; typedef value_type const* const_pointer; + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } + #else + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } + #endif + + size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +}; + +template struct mi_stl_allocator : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; template struct rebind { typedef mi_stl_allocator other; }; mi_stl_allocator() mi_attr_noexcept = default; @@ -446,24 +548,91 @@ template struct mi_stl_allocator { #endif #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 - using propagate_on_container_copy_assignment = std::true_type; - using propagate_on_container_move_assignment = std::true_type; - using propagate_on_container_swap = std::true_type; - using is_always_equal = std::true_type; - template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } - template void destroy(U* p) mi_attr_noexcept { p->~U(); } - #else - void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } - void destroy(pointer p) { p->~value_type(); } + using is_always_equal = std::true_type; #endif - - size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } - pointer address(reference x) const { return &x; } - const_pointer address(const_reference x) const { return &x; } }; template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } + + +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11 +#define MI_HAS_HEAP_STL_ALLOCATOR 1 + +#include // std::shared_ptr + +// Common base class for STL allocators in a specific heap +template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + + _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp, [](mi_heap_t*) {}) {} /* will not delete nor destroy the passed in heap */ + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::false_type; + #endif + + void collect(bool force) { mi_heap_collect(this->heap.get(), force); } + template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); } + +protected: + std::shared_ptr heap; + template friend struct _mi_heap_stl_allocator_common; + + _mi_heap_stl_allocator_common() { + mi_heap_t* hp = mi_heap_new(); + this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ + } + _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + +private: + static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } } + static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } } +}; + +// STL allocator allocation in a specific heap +template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + template struct rebind { typedef mi_heap_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + + +// STL allocator allocation in a specific heap, where `free` does nothing and +// the heap is destroyed in one go on destruction -- use with care! +template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ } + template struct rebind { typedef mi_heap_destroy_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + +#endif // C++11 + #endif // __cplusplus #endif diff --git a/depends/mimalloc/include/mimalloc-atomic.h b/depends/mimalloc/include/mimalloc/atomic.h similarity index 59% rename from depends/mimalloc/include/mimalloc-atomic.h rename to depends/mimalloc/include/mimalloc/atomic.h index 7ad5da585133..592afb16e97f 100644 --- a/depends/mimalloc/include/mimalloc-atomic.h +++ b/depends/mimalloc/include/mimalloc/atomic.h @@ -1,45 +1,64 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2021 Microsoft Research, Daan Leijen +Copyright (c) 2018-2024 Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #pragma once -#ifndef MIMALLOC_ATOMIC_H -#define MIMALLOC_ATOMIC_H +#ifndef MI_ATOMIC_H +#define MI_ATOMIC_H + +// include windows.h or pthreads.h +#if defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)) +#define MI_USE_PTHREADS +#include +#endif // -------------------------------------------------------------------------------------------- // Atomics // We need to be portable between C, C++, and MSVC. -// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. -// This is why we try to use only `uintptr_t` and `*` as atomic types. -// To gain better insight in the range of used atomics, we use explicitly named memory order operations +// We base the primitives on the C/C++ atomics and create a minimal wrapper for MSVC in C compilation mode. +// This is why we try to use only `uintptr_t` and `*` as atomic types. +// To gain better insight in the range of used atomics, we use explicitly named memory order operations // instead of passing the memory order as a parameter. // ----------------------------------------------------------------------------------------------- #if defined(__cplusplus) // Use C++ atomics #include -#define _Atomic(tp) std::atomic -#define mi_atomic(name) std::atomic_##name -#define mi_memory_order(name) std::memory_order_##name -#if !defined(ATOMIC_VAR_INIT) || (__cplusplus >= 202002L) // c++20, see issue #571 - #define MI_ATOMIC_VAR_INIT(x) x +#define _Atomic(tp) std::atomic +#define mi_atomic(name) std::atomic_##name +#define mi_memory_order(name) std::memory_order_##name +#if (__cplusplus >= 202002L) // c++20, see issue #571 + #define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) + #define MI_ATOMIC_VAR_INIT(x) x #else - #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) #endif #elif defined(_MSC_VER) // Use MSVC C wrapper for C11 atomics -#define _Atomic(tp) tp -#define MI_ATOMIC_VAR_INIT(x) x -#define mi_atomic(name) mi_atomic_##name -#define mi_memory_order(name) mi_memory_order_##name +#define _Atomic(tp) tp +#define MI_ATOMIC_VAR_INIT(x) x +#define mi_atomic(name) mi_atomic_##name +#define mi_memory_order(name) mi_memory_order_##name #else // Use C11 atomics #include -#define mi_atomic(name) atomic_##name -#define mi_memory_order(name) memory_order_##name -#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#define mi_atomic(name) atomic_##name +#define mi_memory_order(name) memory_order_##name +#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735 + #define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) + #define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif #endif // Various defines for all used memory orders in mimalloc @@ -53,18 +72,24 @@ terms of the MIT license. A copy of the license can be found in the file #define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) #define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) #define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_exchange_relaxed(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release)) #define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel)) + +#define mi_atomic_cas_weak_relaxed(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(relaxed),mi_memory_order(relaxed)) #define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) #define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) +#define mi_atomic_cas_strong_relaxed(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(relaxed),mi_memory_order(relaxed)) #define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) #define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) #define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed)) -#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_and_relaxed(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_or_relaxed(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(relaxed)) #define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel)) #define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1) @@ -91,6 +116,8 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,(tp*)des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,(tp*)x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) #else @@ -99,6 +126,8 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel(p,exp,des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) mi_atomic_exchange_relaxed(p,x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) #endif @@ -107,24 +136,30 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); } +static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) { + const int64_t add = mi_atomic_load_relaxed((_Atomic(int64_t)*)padd); + if (add != 0) { + mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); + } +} static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p); while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ }; } // Used by timers -#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) -#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) -#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) -#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d) +#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i) #elif defined(_MSC_VER) -// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics. -#define WIN32_LEAN_AND_MEAN -#include +// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics. #include #ifdef _WIN64 typedef LONG64 msc_intptr_t; @@ -189,7 +224,7 @@ static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_ #else uintptr_t x = *p; if (mo > mi_memory_order_relaxed) { - while (!mi_atomic_compare_exchange_weak_explicit(p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; + while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; } return x; #endif @@ -238,6 +273,14 @@ static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int6 return current; #endif } + +static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) { + const int64_t add = *padd; + if (add != 0) { + mi_atomic_addi64_relaxed((volatile _Atomic(int64_t)*)p, add); + } +} + static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) { int64_t current; do { @@ -245,6 +288,21 @@ static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t } while (current < x && _InterlockedCompareExchange64(p, x, current) != current); } +static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) { + mi_atomic_addi64_relaxed(p, i); +} + +static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) { + int64_t read = _InterlockedCompareExchange64(p, des, *exp); + if (read == *exp) { + return true; + } + else { + *exp = read; + return false; + } +} + // The pointer macros cast to `uintptr_t`. #define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p)) #define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p)) @@ -253,6 +311,8 @@ static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_strong_acq_rel(tp,p,exp,des) mi_atomic_cas_strong_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_exchange_ptr_relaxed(tp,p,x) (tp*)mi_atomic_exchange_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) @@ -275,15 +335,41 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) { return (intptr_t)mi_atomic_addi(p, -sub); } -// Yield + +// ---------------------------------------------------------------------- +// Once and Guard +// ---------------------------------------------------------------------- + +typedef _Atomic(uintptr_t) mi_atomic_once_t; + +// Returns true only on the first invocation +static inline bool mi_atomic_once( mi_atomic_once_t* once ) { + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 +} + +typedef _Atomic(uintptr_t) mi_atomic_guard_t; + +// Allows only one thread to execute at a time +#define mi_atomic_guard(guard) \ + uintptr_t _mi_guard_expected = 0; \ + for(bool _mi_guard_once = true; \ + _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \ + (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) ) + + + +// ---------------------------------------------------------------------- +// Yield +// ---------------------------------------------------------------------- + #if defined(__cplusplus) #include static inline void mi_atomic_yield(void) { std::this_thread::yield(); } #elif defined(_WIN32) -#define WIN32_LEAN_AND_MEAN -#include static inline void mi_atomic_yield(void) { YieldProcessor(); } @@ -293,8 +379,9 @@ static inline void mi_atomic_yield(void) { _mm_pause(); } #elif (defined(__GNUC__) || defined(__clang__)) && \ - (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \ - defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) + (defined(__x86_64__) || defined(__i386__) || \ + defined(__aarch64__) || defined(__arm__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)) #if defined(__x86_64__) || defined(__i386__) static inline void mi_atomic_yield(void) { __asm__ volatile ("pause" ::: "memory"); @@ -303,19 +390,27 @@ static inline void mi_atomic_yield(void) { static inline void mi_atomic_yield(void) { __asm__ volatile("wfe"); } -#elif (defined(__arm__) && __ARM_ARCH__ >= 7) +#elif defined(__arm__) +#if __ARM_ARCH >= 7 static inline void mi_atomic_yield(void) { __asm__ volatile("yield" ::: "memory"); } -#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) +#else static inline void mi_atomic_yield(void) { - __asm__ __volatile__ ("or 27,27,27" ::: "memory"); + __asm__ volatile ("nop" ::: "memory"); } -#elif defined(__armel__) || defined(__ARMEL__) +#endif +#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__) +#ifdef __APPLE__ static inline void mi_atomic_yield(void) { - __asm__ volatile ("nop" ::: "memory"); + __asm__ volatile ("or r27,r27,r27" ::: "memory"); +} +#else +static inline void mi_atomic_yield(void) { + __asm__ __volatile__ ("or 27,27,27" ::: "memory"); } #endif +#endif #elif defined(__sun) // Fallback for other archs #include @@ -335,4 +430,133 @@ static inline void mi_atomic_yield(void) { #endif -#endif // __MIMALLOC_ATOMIC_H +// ---------------------------------------------------------------------- +// Locks +// These should be light-weight in-process only locks. +// Only used for reserving arena's and to maintain the abandoned list. +// ---------------------------------------------------------------------- +#if _MSC_VER +#pragma warning(disable:26110) // unlock with holding lock +#endif + +#define mi_lock(lock) for(bool _go = (mi_lock_acquire(lock),true); _go; (mi_lock_release(lock), _go=false) ) + +#if defined(_WIN32) + +#if 1 +#define mi_lock_t SRWLOCK // slim reader-writer lock + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return TryAcquireSRWLockExclusive(lock); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + AcquireSRWLockExclusive(lock); +} +static inline void mi_lock_release(mi_lock_t* lock) { + ReleaseSRWLockExclusive(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + InitializeSRWLock(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#else +#define mi_lock_t CRITICAL_SECTION + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return TryEnterCriticalSection(lock); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + EnterCriticalSection(lock); +} +static inline void mi_lock_release(mi_lock_t* lock) { + LeaveCriticalSection(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + InitializeCriticalSection(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + DeleteCriticalSection(lock); +} + +#endif + +#elif defined(MI_USE_PTHREADS) + +void _mi_error_message(int err, const char* fmt, ...); + +#define mi_lock_t pthread_mutex_t + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return (pthread_mutex_trylock(lock) == 0); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + const int err = pthread_mutex_lock(lock); + if (err != 0) { + _mi_error_message(err, "internal error: lock cannot be acquired\n"); + } +} +static inline void mi_lock_release(mi_lock_t* lock) { + pthread_mutex_unlock(lock); +} +static inline void mi_lock_init(mi_lock_t* lock) { + pthread_mutex_init(lock, NULL); +} +static inline void mi_lock_done(mi_lock_t* lock) { + pthread_mutex_destroy(lock); +} + +#elif defined(__cplusplus) + +#include +#define mi_lock_t std::mutex + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + return lock->try_lock(); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + lock->lock(); +} +static inline void mi_lock_release(mi_lock_t* lock) { + lock->unlock(); +} +static inline void mi_lock_init(mi_lock_t* lock) { + (void)(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#else + +// fall back to poor man's locks. +// this should only be the case in a single-threaded environment (like __wasi__) + +#define mi_lock_t _Atomic(uintptr_t) + +static inline bool mi_lock_try_acquire(mi_lock_t* lock) { + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1); +} +static inline void mi_lock_acquire(mi_lock_t* lock) { + for (int i = 0; i < 1000; i++) { // for at most 1000 tries? + if (mi_lock_try_acquire(lock)) return; + mi_atomic_yield(); + } +} +static inline void mi_lock_release(mi_lock_t* lock) { + mi_atomic_store_release(lock, (uintptr_t)0); +} +static inline void mi_lock_init(mi_lock_t* lock) { + mi_lock_release(lock); +} +static inline void mi_lock_done(mi_lock_t* lock) { + (void)(lock); +} + +#endif + + +#endif // MI_ATOMIC_H diff --git a/depends/mimalloc/include/mimalloc/bits.h b/depends/mimalloc/include/mimalloc/bits.h new file mode 100644 index 000000000000..1d4063bb5994 --- /dev/null +++ b/depends/mimalloc/include/mimalloc/bits.h @@ -0,0 +1,377 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2024 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- + Bit operation, and platform dependent definition (MI_INTPTR_SIZE etc) +---------------------------------------------------------------------------- */ + +#pragma once +#ifndef MI_BITS_H +#define MI_BITS_H + + +// ------------------------------------------------------ +// Size of a pointer. +// We assume that `sizeof(void*)==sizeof(intptr_t)` +// and it holds for all platforms we know of. +// +// However, the C standard only requires that: +// p == (void*)((intptr_t)p)) +// but we also need: +// i == (intptr_t)((void*)i) +// or otherwise one might define an intptr_t type that is larger than a pointer... +// ------------------------------------------------------ + +#if INTPTR_MAX > INT64_MAX +# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example) +#elif INTPTR_MAX == INT64_MAX +# define MI_INTPTR_SHIFT (3) +#elif INTPTR_MAX == INT32_MAX +# define MI_INTPTR_SHIFT (2) +#else +#error platform pointers must be 32, 64, or 128 bits +#endif + +#if (INTPTR_MAX) > LONG_MAX +# define MI_PU(x) x##ULL +#else +# define MI_PU(x) x##UL +#endif + +#if SIZE_MAX == UINT64_MAX +# define MI_SIZE_SHIFT (3) +typedef int64_t mi_ssize_t; +#elif SIZE_MAX == UINT32_MAX +# define MI_SIZE_SHIFT (2) +typedef int32_t mi_ssize_t; +#else +#error platform objects must be 32 or 64 bits in size +#endif + +#if (SIZE_MAX/2) > LONG_MAX +# define MI_ZU(x) x##ULL +#else +# define MI_ZU(x) x##UL +#endif + +#define MI_INTPTR_SIZE (1< +#elif MI_ARCH_ARM64 && MI_OPT_SIMD +#include +#endif +#if defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) +#include +#endif + +#if MI_ARCH_X64 && defined(__AVX2__) && !defined(__BMI2__) // msvc +#define __BMI2__ 1 +#endif +#if MI_ARCH_X64 && (defined(__AVX2__) || defined(__BMI2__)) && !defined(__BMI1__) // msvc +#define __BMI1__ 1 +#endif + +// Define big endian if needed +// #define MI_BIG_ENDIAN 1 + +// maximum virtual address bits in a user-space pointer +#if MI_DEFAULT_VIRTUAL_ADDRESS_BITS > 0 +#define MI_MAX_VABITS MI_DEFAULT_VIRTUAL_ADDRESS_BITS +#elif MI_ARCH_X64 +#define MI_MAX_VABITS (47) +#elif MI_INTPTR_SIZE > 4 +#define MI_MAX_VABITS (48) +#else +#define MI_MAX_VABITS (32) +#endif + + +// use a flat page-map (or a 2-level one) +#ifndef MI_PAGE_MAP_FLAT +#if MI_MAX_VABITS <= 40 && !MI_SECURE && !defined(__APPLE__) +#define MI_PAGE_MAP_FLAT 1 +#else +#define MI_PAGE_MAP_FLAT 0 +#endif +#endif + +#if MI_PAGE_MAP_FLAT && MI_SECURE +#error should not use MI_PAGE_MAP_FLAT with a secure build +#endif + + +/* -------------------------------------------------------------------------------- + Builtin's +-------------------------------------------------------------------------------- */ + +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#define mi_builtin(name) __builtin_##name +#define mi_has_builtin(name) __has_builtin(__builtin_##name) + +#if (LONG_MAX == INT32_MAX) +#define mi_builtin32(name) mi_builtin(name##l) +#define mi_has_builtin32(name) mi_has_builtin(name##l) +#else +#define mi_builtin32(name) mi_builtin(name) +#define mi_has_builtin32(name) mi_has_builtin(name) +#endif +#if (LONG_MAX == INT64_MAX) +#define mi_builtin64(name) mi_builtin(name##l) +#define mi_has_builtin64(name) mi_has_builtin(name##l) +#else +#define mi_builtin64(name) mi_builtin(name##ll) +#define mi_has_builtin64(name) mi_has_builtin(name##ll) +#endif + +#if (MI_SIZE_BITS == 32) +#define mi_builtinz(name) mi_builtin32(name) +#define mi_has_builtinz(name) mi_has_builtin32(name) +#define mi_msc_builtinz(name) name +#elif (MI_SIZE_BITS == 64) +#define mi_builtinz(name) mi_builtin64(name) +#define mi_has_builtinz(name) mi_has_builtin64(name) +#define mi_msc_builtinz(name) name##64 +#endif + +/* -------------------------------------------------------------------------------- + Popcount and count trailing/leading zero's +-------------------------------------------------------------------------------- */ + +size_t _mi_popcount_generic(size_t x); +extern bool _mi_cpu_has_popcnt; + +static inline size_t mi_popcount(size_t x) { + #if defined(__GNUC__) && (MI_ARCH_X64 || MI_ARCH_X86) + #if !defined(__BMI1__) + if mi_unlikely(!_mi_cpu_has_popcnt) { return _mi_popcount_generic(x); } + #endif + size_t r; + __asm ("popcnt\t%1,%0" : "=r"(r) : "r"(x) : "cc"); + return r; + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86) + #if !defined(__BMI1__) + if mi_unlikely(!_mi_cpu_has_popcnt) { return _mi_popcount_generic(x); } + #endif + return (size_t)mi_msc_builtinz(__popcnt)(x); + #elif defined(_MSC_VER) && MI_ARCH_ARM64 + return (size_t)mi_msc_builtinz(__popcnt)(x); + #elif mi_has_builtinz(popcount) + return mi_builtinz(popcount)(x); + #else + #define MI_HAS_FAST_POPCOUNT 0 + return _mi_popcount_generic(x); + #endif +} + +#ifndef MI_HAS_FAST_POPCOUNT +#define MI_HAS_FAST_POPCOUNT 1 +#endif + + + +size_t _mi_clz_generic(size_t x); +size_t _mi_ctz_generic(size_t x); + +static inline size_t mi_ctz(size_t x) { + #if defined(__GNUC__) && MI_ARCH_X64 && defined(__BMI1__) + size_t r; + __asm ("tzcnt\t%1, %0" : "=r"(r) : "r"(x) : "cc"); + return r; + #elif defined(__GNUC__) && MI_ARCH_X64 + // tzcnt is interpreted as bsf if BMI1 is not supported (pre-haswell) + // if the argument is zero: + // - tzcnt: sets carry-flag, and returns MI_SIZE_BITS + // - bsf : sets zero-flag, and leaves the destination _unmodified_ (on both AMD and Intel now, see ) + // so we always initialize r to MI_SIZE_BITS to work correctly on all cpu's without branching + size_t r = MI_SIZE_BITS; + __asm ("tzcnt\t%1, %0" : "+r"(r) : "r"(x) : "cc"); // use '+r' to keep the assignment to r in case this becomes bsf on older cpu's + return r; + #elif mi_has_builtinz(ctz) + return (x!=0 ? (size_t)mi_builtinz(ctz)(x) : MI_SIZE_BITS); + #elif defined(_MSC_VER) && MI_ARCH_X64 && defined(__BMI1__) + return (x!=0 ? _tzcnt_u64(x) : MI_SIZE_BITS); // ensure it still works on non-BMI1 cpu's as well + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) + if (x==0) return MI_SIZE_BITS; // test explicitly for `x==0` to avoid codegen bug (issue #1071) + unsigned long idx; mi_msc_builtinz(_BitScanForward)(&idx, x); + return (size_t)idx; + #elif defined(__GNUC__) && MI_ARCH_X86 + size_t r = MI_SIZE_BITS; + __asm ("bsf\t%1, %0" : "+r"(r) : "r"(x) : "cc"); + return r; + #elif MI_HAS_FAST_POPCOUNT + return (x!=0 ? (mi_popcount(x^(x-1))-1) : MI_SIZE_BITS); + #else + #define MI_HAS_FAST_BITSCAN 0 + return (x!=0 ? _mi_ctz_generic(x) : MI_SIZE_BITS); + #endif +} + +static inline size_t mi_clz(size_t x) { + // we don't optimize anymore to lzcnt as there are still non BMI1 cpu's around (like Intel Celeron, see issue #1016) + // on pre-haswell cpu's lzcnt gets executed as bsr which is not equivalent (at it returns the bit position) + #if defined(__GNUC__) && MI_ARCH_X64 && defined(__BMI1__) // on x64 lzcnt is defined for 0 + size_t r; + __asm ("lzcnt\t%1, %0" : "=r"(r) : "r"(x) : "cc"); + return r; + #elif mi_has_builtinz(clz) + return (x!=0 ? (size_t)mi_builtinz(clz)(x) : MI_SIZE_BITS); + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) + if (x==0) return MI_SIZE_BITS; // test explicitly for `x==0` to avoid codegen bug (issue #1071) + unsigned long idx; mi_msc_builtinz(_BitScanReverse)(&idx, x); + return (MI_SIZE_BITS - 1 - (size_t)idx); + #elif defined(__GNUC__) && (MI_ARCH_X64 || MI_ARCH_X86) + if (x==0) return MI_SIZE_BITS; + size_t r; + __asm ("bsr\t%1, %0" : "=r"(r) : "r"(x) : "cc"); + return (MI_SIZE_BITS - 1 - r); + #else + #define MI_HAS_FAST_BITSCAN 0 + return (x!=0 ? _mi_clz_generic(x) : MI_SIZE_BITS); + #endif +} + +#ifndef MI_HAS_FAST_BITSCAN +#define MI_HAS_FAST_BITSCAN 1 +#endif + +/* -------------------------------------------------------------------------------- + find trailing/leading zero (bit scan forward/reverse) +-------------------------------------------------------------------------------- */ + +// Bit scan forward: find the least significant bit that is set (i.e. count trailing zero's) +// return false if `x==0` (with `*idx` undefined) and true otherwise, +// with the `idx` is set to the bit index (`0 <= *idx < MI_BFIELD_BITS`). +static inline bool mi_bsf(size_t x, size_t* idx) { + #if defined(__GNUC__) && MI_ARCH_X64 && defined(__BMI1__) && (!defined(__clang_major__) || __clang_major__ >= 9) + // on x64 the carry flag is set on zero which gives better codegen + bool is_zero; + __asm ( "tzcnt\t%2, %1" : "=@ccc"(is_zero), "=r"(*idx) : "r"(x) : "cc" ); + return !is_zero; + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) + if (x==0) return false; // test explicitly for `x==0` to avoid codegen bug (issue #1071) + unsigned long i; mi_msc_builtinz(_BitScanForward)(&i, x); + *idx = (size_t)i; + return true; + #else + return (x!=0 ? (*idx = mi_ctz(x), true) : false); + #endif +} + +// Bit scan reverse: find the most significant bit that is set +// return false if `x==0` (with `*idx` undefined) and true otherwise, +// with the `idx` is set to the bit index (`0 <= *idx < MI_BFIELD_BITS`). +static inline bool mi_bsr(size_t x, size_t* idx) { + #if defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) + if (x==0) return false; // test explicitly for `x==0` to avoid codegen bug (issue #1071) + unsigned long i; mi_msc_builtinz(_BitScanReverse)(&i, x); + *idx = (size_t)i; + return true; + #else + return (x!=0 ? (*idx = MI_SIZE_BITS - 1 - mi_clz(x), true) : false); + #endif +} + + +/* -------------------------------------------------------------------------------- + rotate +-------------------------------------------------------------------------------- */ + +static inline size_t mi_rotr(size_t x, size_t r) { + #if (mi_has_builtin(rotateright64) && MI_SIZE_BITS==64) + return mi_builtin(rotateright64)(x,r); + #elif (mi_has_builtin(rotateright32) && MI_SIZE_BITS==32) + return mi_builtin(rotateright32)(x,r); + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_ARM64) + return _rotr64(x, (int)r); + #elif defined(_MSC_VER) && (MI_ARCH_X86 || MI_ARCH_ARM32) + return _lrotr(x,(int)r); + #else + // The term `(-rshift)&(BITS-1)` is written instead of `BITS - rshift` to + // avoid UB when `rshift==0`. See + const unsigned int rshift = (unsigned int)(r) & (MI_SIZE_BITS-1); + return ((x >> rshift) | (x << ((-rshift) & (MI_SIZE_BITS-1)))); + #endif +} + +static inline size_t mi_rotl(size_t x, size_t r) { + #if (mi_has_builtin(rotateleft64) && MI_SIZE_BITS==64) + return mi_builtin(rotateleft64)(x,r); + #elif (mi_has_builtin(rotateleft32) && MI_SIZE_BITS==32) + return mi_builtin(rotateleft32)(x,r); + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_ARM64) + return _rotl64(x, (int)r); + #elif defined(_MSC_VER) && (MI_ARCH_X86 || MI_ARCH_ARM32) + return _lrotl(x, (int)r); + #else + // The term `(-rshift)&(BITS-1)` is written instead of `BITS - rshift` to + // avoid UB when `rshift==0`. See + const unsigned int rshift = (unsigned int)(r) & (MI_SIZE_BITS-1); + return ((x << rshift) | (x >> ((-rshift) & (MI_SIZE_BITS-1)))); + #endif +} + +static inline uint32_t mi_rotl32(uint32_t x, uint32_t r) { + #if mi_has_builtin(rotateleft32) + return mi_builtin(rotateleft32)(x,r); + #elif defined(_MSC_VER) && (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64 || MI_ARCH_ARM32) + return _lrotl(x, (int)r); + #else + // The term `(-rshift)&(BITS-1)` is written instead of `BITS - rshift` to + // avoid UB when `rshift==0`. See + const unsigned int rshift = (unsigned int)(r) & 31; + return ((x << rshift) | (x >> ((-rshift) & 31))); + #endif +} + + +#endif // MI_BITS_H diff --git a/depends/mimalloc/include/mimalloc/internal.h b/depends/mimalloc/include/mimalloc/internal.h new file mode 100644 index 000000000000..62387dc348d0 --- /dev/null +++ b/depends/mimalloc/include/mimalloc/internal.h @@ -0,0 +1,1203 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MI_INTERNAL_H +#define MI_INTERNAL_H + +// -------------------------------------------------------------------------- +// This file contains the internal API's of mimalloc and various utility +// functions and macros. +// -------------------------------------------------------------------------- + +#include "types.h" +#include "track.h" +#include "bits.h" + + +// -------------------------------------------------------------------------- +// Compiler defines +// -------------------------------------------------------------------------- + +#if (MI_DEBUG>0) +#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) +#else +#define mi_trace_message(...) +#endif + +#define mi_decl_cache_align mi_decl_align(64) + +#if defined(_MSC_VER) +#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) +#pragma warning(disable:26812) // unscoped enum warning +#define mi_decl_noinline __declspec(noinline) +#define mi_decl_thread __declspec(thread) +#define mi_decl_align(a) __declspec(align(a)) +#define mi_decl_noreturn __declspec(noreturn) +#define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold +#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc +#define mi_decl_noinline __attribute__((noinline)) +#define mi_decl_thread __thread +#define mi_decl_align(a) __attribute__((aligned(a))) +#define mi_decl_noreturn __attribute__((noreturn)) +#define mi_decl_weak __attribute__((weak)) +#define mi_decl_hidden __attribute__((visibility("hidden"))) +#if (__GNUC__ >= 4) || defined(__clang__) +#define mi_decl_cold __attribute__((cold)) +#else +#define mi_decl_cold +#endif +#elif __cplusplus >= 201103L // c++11 +#define mi_decl_noinline +#define mi_decl_thread thread_local +#define mi_decl_align(a) alignas(a) +#define mi_decl_noreturn [[noreturn]] +#define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold +#else +#define mi_decl_noinline +#define mi_decl_thread __thread // hope for the best :-) +#define mi_decl_align(a) +#define mi_decl_noreturn +#define mi_decl_weak +#define mi_decl_hidden +#define mi_decl_cold +#endif + +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if defined(__cplusplus) +#define mi_decl_externc extern "C" +#else +#define mi_decl_externc +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 7)) || defined(__clang__) // includes clang and icc +#define mi_decl_maybe_unused __attribute__((unused)) +#elif __cplusplus >= 201703L // c++17 +#define mi_decl_maybe_unused [[maybe_unused]] +#else +#define mi_decl_maybe_unused +#endif + +#if defined(__cplusplus) +#define mi_decl_externc extern "C" +#else +#define mi_decl_externc +#endif + + +#if defined(__EMSCRIPTEN__) && !defined(__wasi__) +#define __wasi__ +#endif + + +// -------------------------------------------------------------------------- +// Internal functions +// -------------------------------------------------------------------------- + + +// "libc.c" +#include +int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); +int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); +bool _mi_getenv(const char* name, char* result, size_t result_size); + +// "options.c" +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_raw_message(const char* fmt, ...); +void _mi_message(const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_options_init(void); +long _mi_option_get_fast(mi_option_t option); +void _mi_error_message(int err, const char* fmt, ...); + +// random.c +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +static inline uintptr_t _mi_random_shuffle(uintptr_t x); + +// init.c +extern mi_decl_hidden mi_decl_cache_align const mi_page_t _mi_page_empty; +void _mi_auto_process_init(void); +void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept; +bool _mi_is_redirected(void); +bool _mi_allocator_init(const char** message); +void _mi_allocator_done(void); +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +void _mi_thread_done(mi_heap_t* heap); + +mi_subproc_t* _mi_subproc(void); +mi_subproc_t* _mi_subproc_main(void); +mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id); +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; +size_t _mi_thread_seq_id(void) mi_attr_noexcept; +mi_tld_t* _mi_thread_tld(void) mi_attr_noexcept; +void _mi_heap_guarded_init(mi_heap_t* heap); +mi_heap_t* _mi_heap_main_get(void); + +// os.c +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid); +void* _mi_os_zalloc(size_t size, mi_memid_t* memid); +void _mi_os_free(void* p, size_t size, mi_memid_t memid); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc ); + +size_t _mi_os_page_size(void); +size_t _mi_os_guard_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); +size_t _mi_os_virtual_address_bits(void); + +bool _mi_os_reset(void* addr, size_t size); +bool _mi_os_decommit(void* addr, size_t size); +void _mi_os_reuse(void* p, size_t size); +mi_decl_nodiscard bool _mi_os_commit(void* p, size_t size, bool* is_zero); +mi_decl_nodiscard bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size); +mi_decl_nodiscard bool _mi_os_protect(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stats_size, mi_commit_fun_t* commit_fun, void* commit_fun_arg); + +size_t _mi_os_secure_guard_page_size(void); +bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_set_before(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_reset_at(void* addr, mi_memid_t memid); +bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid); + +int _mi_os_numa_node(void); +int _mi_os_numa_node_count(void); + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid); + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); + + +// arena.c +mi_arena_id_t _mi_arena_id_none(void); +mi_arena_t* _mi_arena_from_id(mi_arena_id_t id); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena); + +void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid); +void* _mi_arenas_alloc_aligned(mi_subproc_t* subproc, size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_pinned, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid); +void _mi_arenas_free(void* p, size_t size, mi_memid_t memid); +bool _mi_arenas_contain(const void* p); +void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld); +void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld); + +mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment); +void _mi_arenas_page_free(mi_page_t* page, mi_tld_t* tld); +void _mi_arenas_page_abandon(mi_page_t* page, mi_tld_t* tld); +void _mi_arenas_page_unabandon(mi_page_t* page); +bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page); + +// arena-meta.c +void* _mi_meta_zalloc( size_t size, mi_memid_t* memid ); +void _mi_meta_free(void* p, size_t size, mi_memid_t memid); +bool _mi_meta_is_meta_page(void* p); + +// "page-map.c" +bool _mi_page_map_init(void); +void _mi_page_map_register(mi_page_t* page); +void _mi_page_map_unregister(mi_page_t* page); +void _mi_page_map_unregister_range(void* start, size_t size); +mi_page_t* _mi_safe_ptr_page(const void* p); +void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc); + +// "page.c" +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; + +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); + +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); + +void _mi_page_free_collect(mi_page_t* page, bool force); +void _mi_page_free_collect_partly(mi_page_t* page, mi_block_t* head); +mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page); +bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq); + +size_t _mi_page_bin(const mi_page_t* page); // for stats +size_t _mi_bin_size(size_t bin); // for stats +size_t _mi_bin(size_t size); // for stats + +// "heap.c" +mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id, mi_tld_t* tld); +void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag, mi_tld_t* tld); +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(mi_heap_t* heap); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); +void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page); +bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg); +void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page); + +// "stats.c" +void _mi_stats_init(void); +void _mi_stats_done(mi_stats_t* stats); +void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out, void* arg) mi_attr_noexcept; +void _mi_stats_merge_thread(mi_tld_t* tld); +void _mi_stats_merge_from(mi_stats_t* to, mi_stats_t* from); +mi_msecs_t _mi_clock_now(void); +mi_msecs_t _mi_clock_end(mi_msecs_t start); +mi_msecs_t _mi_clock_start(void); + +// "alloc.c" +void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` +void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p); +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); + +#if MI_DEBUG>1 +bool _mi_page_is_valid(mi_page_t* page); +#endif + + +// ------------------------------------------------------ +// Assertions +// ------------------------------------------------------ + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func) mi_attr_noexcept; +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + + +/* ----------------------------------------------------------- + Statistics (in `stats.c`) +----------------------------------------------------------- */ + +// add to stat keeping track of the peak +void __mi_stat_increase(mi_stat_count_t* stat, size_t amount); +void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount); +void __mi_stat_increase_mt(mi_stat_count_t* stat, size_t amount); +void __mi_stat_decrease_mt(mi_stat_count_t* stat, size_t amount); + +// adjust stat in special cases to compensate for double counting (and does not adjust peak values and can decrease the total) +void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount); +void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount); +void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount); +void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount); + +// counters can just be increased +void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); +void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount); + +#define mi_subproc_stat_counter_increase(subproc,stat,amount) __mi_stat_counter_increase_mt( &(subproc)->stats.stat, amount) +#define mi_subproc_stat_increase(subproc,stat,amount) __mi_stat_increase_mt( &(subproc)->stats.stat, amount) +#define mi_subproc_stat_decrease(subproc,stat,amount) __mi_stat_decrease_mt( &(subproc)->stats.stat, amount) +#define mi_subproc_stat_adjust_increase(subproc,stat,amnt) __mi_stat_adjust_increase_mt( &(subproc)->stats.stat, amnt) +#define mi_subproc_stat_adjust_decrease(subproc,stat,amnt) __mi_stat_adjust_decrease_mt( &(subproc)->stats.stat, amnt) + +#define mi_tld_stat_counter_increase(tld,stat,amount) __mi_stat_counter_increase( &(tld)->stats.stat, amount) +#define mi_tld_stat_increase(tld,stat,amount) __mi_stat_increase( &(tld)->stats.stat, amount) +#define mi_tld_stat_decrease(tld,stat,amount) __mi_stat_decrease( &(tld)->stats.stat, amount) +#define mi_tld_stat_adjust_increase(tld,stat,amnt) __mi_stat_adjust_increase( &(tld)->stats.stat, amnt) +#define mi_tld_stat_adjust_decrease(tld,stat,amnt) __mi_stat_adjust_decrease( &(tld)->stats.stat, amnt) + +#define mi_os_stat_counter_increase(stat,amount) mi_subproc_stat_counter_increase(_mi_subproc(),stat,amount) +#define mi_os_stat_increase(stat,amount) mi_subproc_stat_increase(_mi_subproc(),stat,amount) +#define mi_os_stat_decrease(stat,amount) mi_subproc_stat_decrease(_mi_subproc(),stat,amount) + +#define mi_heap_stat_counter_increase(heap,stat,amount) mi_tld_stat_counter_increase(heap->tld, stat, amount) +#define mi_heap_stat_increase(heap,stat,amount) mi_tld_stat_increase( heap->tld, stat, amount) +#define mi_heap_stat_decrease(heap,stat,amount) mi_tld_stat_decrease( heap->tld, stat, amount) +#define mi_heap_stat_adjust_decrease(heap,stat,amount) mi_tld_stat_adjust_decrease( heap->tld, stat, amount) + +/* ----------------------------------------------------------- + Options (exposed for the debugger) +----------------------------------------------------------- */ +typedef enum mi_option_init_e { + MI_OPTION_UNINIT, // not yet initialized + MI_OPTION_DEFAULTED, // not found in the environment, use default value + MI_OPTION_INITIALIZED // found in environment or set explicitly +} mi_option_init_t; + +typedef struct mi_option_desc_s { + long value; // the value + mi_option_init_t init; // is it initialized yet? (from the environment) + mi_option_t option; // for debugging: the option index should match the option + const char* name; // option name without `mimalloc_` prefix + const char* legacy_name; // potential legacy option name +} mi_option_desc_t; + + + +/* ----------------------------------------------------------- + Inlined definitions +----------------------------------------------------------- */ +#define MI_UNUSED(x) (void)(x) +#if (MI_DEBUG>0) +#define MI_UNUSED_RELEASE(x) +#else +#define MI_UNUSED_RELEASE(x) MI_UNUSED(x) +#endif + +#define MI_INIT4(x) x(),x(),x(),x() +#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x) +#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x) +#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x) +#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) +#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) +#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) + +#define MI_INIT74(x) MI_INIT64(x),MI_INIT8(x),x(),x() +#define MI_INIT5(x) MI_INIT4(x),x() + +#include +// initialize a local variable to zero; use memset as compilers optimize constant sized memset's +#define _mi_memzero_var(x) memset(&x,0,sizeof(x)) + +// Is `x` a power of two? (0 is considered a power of two) +static inline bool _mi_is_power_of_two(uintptr_t x) { + return ((x & (x - 1)) == 0); +} + +// Is a pointer aligned? +static inline bool _mi_is_aligned(void* p, size_t alignment) { + mi_assert_internal(alignment != 0); + return (((uintptr_t)p % alignment) == 0); +} + +// Align upwards +static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return ((sz + mask) & ~mask); + } + else { + return (((sz + mask)/alignment)*alignment); + } +} + + +// Align a pointer upwards +static inline uint8_t* _mi_align_up_ptr(void* p, size_t alignment) { + return (uint8_t*)_mi_align_up((uintptr_t)p, alignment); +} + + +static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return (sz & ~mask); + } + else { + return ((sz / alignment) * alignment); + } +} + +static inline void* mi_align_down_ptr(void* p, size_t alignment) { + return (void*)_mi_align_down((uintptr_t)p, alignment); +} + +// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. +static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { + mi_assert_internal(divider != 0); + return (divider == 0 ? size : ((size + divider - 1) / divider)); +} + + +// clamp an integer +static inline size_t _mi_clamp(size_t sz, size_t min, size_t max) { + if (sz < min) return min; + else if (sz > max) return max; + else return sz; +} + +// Is memory zero initialized? +static inline bool mi_mem_is_zero(const void* p, size_t size) { + for (size_t i = 0; i < size; i++) { + if (((uint8_t*)p)[i] != 0) return false; + } + return true; +} + +// Align a byte size to a size in _machine words_, +// i.e. byte size == `wsize*sizeof(void*)`. +static inline size_t _mi_wsize_from_size(size_t size) { + mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t)); + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Overflow detecting multiply +#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5)) +#include // UINT_MAX, ULONG_MAX +#if defined(_CLOCK_T) // for Illumos +#undef _CLOCK_T +#endif +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #if (SIZE_MAX == ULONG_MAX) + return __builtin_umull_overflow(count, size, (unsigned long *)total); + #elif (SIZE_MAX == UINT_MAX) + return __builtin_umul_overflow(count, size, (unsigned int *)total); + #else + return __builtin_umulll_overflow(count, size, (unsigned long long *)total); + #endif +} +#else /* __builtin_umul_overflow is unavailable */ +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #define MI_MUL_COULD_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) + *total = count * size; + // note: gcc/clang optimize this to directly check the overflow flag + return ((size >= MI_MUL_COULD_OVERFLOW || count >= MI_MUL_COULD_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count); +} +#endif + +// Safe multiply `count*size` into `total`; return `true` on overflow. +static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { + if (count==1) { // quick check for the case where count is one (common for C++ allocators) + *total = size; + return false; + } + else if mi_unlikely(mi_mul_overflow(count, size, total)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); + #endif + *total = SIZE_MAX; + return true; + } + else return false; +} + + +/*---------------------------------------------------------------------------------------- + Heap functions +------------------------------------------------------------------------------------------- */ + +extern mi_decl_hidden const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap + +static inline bool mi_heap_is_backing(const mi_heap_t* heap) { + return (heap->tld->heap_backing == heap); +} + +static inline bool mi_heap_is_initialized(const mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + return (heap != NULL && heap != &_mi_heap_empty); +} + +static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { + mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE)); + const size_t idx = _mi_wsize_from_size(size); + mi_assert_internal(idx < MI_PAGES_DIRECT); + return heap->pages_free_direct[idx]; +} + + +//static inline uintptr_t _mi_ptr_cookie(const void* p) { +// extern mi_heap_t _mi_heap_main; +// mi_assert_internal(_mi_heap_main.cookie != 0); +// return ((uintptr_t)p ^ _mi_heap_main.cookie); +//} + + +/* ----------------------------------------------------------- + The page map maps addresses to `mi_page_t` pointers +----------------------------------------------------------- */ + +#if MI_PAGE_MAP_FLAT + +// flat page-map committed on demand, using one byte per slice (64 KiB). +// single indirection and low commit, but large initial virtual reserve (4 GiB with 48 bit virtual addresses) +// used by default on <= 40 bit virtual address spaces. +extern mi_decl_hidden uint8_t* _mi_page_map; + +static inline size_t _mi_page_map_index(const void* p) { + return (size_t)((uintptr_t)p >> MI_ARENA_SLICE_SHIFT); +} + +static inline mi_page_t* _mi_ptr_page_ex(const void* p, bool* valid) { + const size_t idx = _mi_page_map_index(p); + const size_t ofs = _mi_page_map[idx]; + if (valid != NULL) { *valid = (ofs != 0); } + return (mi_page_t*)((((uintptr_t)p >> MI_ARENA_SLICE_SHIFT) + 1 - ofs) << MI_ARENA_SLICE_SHIFT); +} + +static inline mi_page_t* _mi_checked_ptr_page(const void* p) { + bool valid; + mi_page_t* const page = _mi_ptr_page_ex(p, &valid); + return (valid ? page : NULL); +} + +static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { + return _mi_ptr_page_ex(p, NULL); +} + +#else + +// 2-level page map: +// double indirection, but low commit and low virtual reserve. +// +// the page-map is usually 4 MiB (for 48 bit virtual addresses) and points to sub maps of 64 KiB. +// the page-map is committed on-demand (in 64 KiB parts) (and sub-maps are committed on-demand as well) +// one sub page-map = 64 KiB => covers 2^(16-3) * 2^16 = 2^29 = 512 MiB address space +// the page-map needs 48-(16+13) = 19 bits => 2^19 sub map pointers = 2^22 bytes = 4 MiB reserved size. +#define MI_PAGE_MAP_SUB_SHIFT (13) +#define MI_PAGE_MAP_SUB_COUNT (MI_ZU(1) << MI_PAGE_MAP_SUB_SHIFT) +#define MI_PAGE_MAP_SHIFT (MI_MAX_VABITS - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT) +#define MI_PAGE_MAP_COUNT (MI_ZU(1) << MI_PAGE_MAP_SHIFT) + +extern mi_decl_hidden _Atomic(mi_page_t**)* _mi_page_map; + +static inline size_t _mi_page_map_index(const void* p, size_t* sub_idx) { + const size_t u = (size_t)((uintptr_t)p / MI_ARENA_SLICE_SIZE); + if (sub_idx != NULL) { *sub_idx = u % MI_PAGE_MAP_SUB_COUNT; } + return (u / MI_PAGE_MAP_SUB_COUNT); +} + +static inline mi_page_t** _mi_page_map_at(size_t idx) { + return mi_atomic_load_ptr_relaxed(mi_page_t*, &_mi_page_map[idx]); +} + +static inline mi_page_t* _mi_unchecked_ptr_page(const void* p) { + size_t sub_idx; + const size_t idx = _mi_page_map_index(p, &sub_idx); + return (_mi_page_map_at(idx))[sub_idx]; // NULL if p==NULL +} + +static inline mi_page_t* _mi_checked_ptr_page(const void* p) { + size_t sub_idx; + const size_t idx = _mi_page_map_index(p, &sub_idx); + mi_page_t** const sub = _mi_page_map_at(idx); + if mi_unlikely(sub == NULL) return NULL; + return sub[sub_idx]; +} + +#endif + + +static inline mi_page_t* _mi_ptr_page(const void* p) { + mi_assert_internal(p==NULL || mi_is_in_heap_region(p)); + #if MI_DEBUG || MI_SECURE || defined(__APPLE__) + return _mi_checked_ptr_page(p); + #else + return _mi_unchecked_ptr_page(p); + #endif +} + + +// Get the block size of a page +static inline size_t mi_page_block_size(const mi_page_t* page) { + mi_assert_internal(page->block_size > 0); + return page->block_size; +} + +// Page start +static inline uint8_t* mi_page_start(const mi_page_t* page) { + return page->page_start; +} + +static inline size_t mi_page_size(const mi_page_t* page) { + return mi_page_block_size(page) * page->reserved; +} + +static inline uint8_t* mi_page_area(const mi_page_t* page, size_t* size) { + if (size) { *size = mi_page_size(page); } + return mi_page_start(page); +} + +static inline size_t mi_page_info_size(void) { + return _mi_align_up(sizeof(mi_page_t), MI_MAX_ALIGN_SIZE); +} + +static inline bool mi_page_contains_address(const mi_page_t* page, const void* p) { + size_t psize; + uint8_t* start = mi_page_area(page, &psize); + return (start <= (uint8_t*)p && (uint8_t*)p < start + psize); +} + +static inline bool mi_page_is_in_arena(const mi_page_t* page) { + return (page->memid.memkind == MI_MEM_ARENA); +} + +static inline bool mi_page_is_singleton(const mi_page_t* page) { + return (page->reserved == 1); +} + +// Get the usable block size of a page without fixed padding. +// This may still include internal padding due to alignment and rounding up size classes. +static inline size_t mi_page_usable_block_size(const mi_page_t* page) { + return mi_page_block_size(page) - MI_PADDING_SIZE; +} + +// This may change if we locate page info outside the page data slices +static inline uint8_t* mi_page_slice_start(const mi_page_t* page) { + return (uint8_t*)page; +} + +// This gives the offset relative to the start slice of a page. This may change if we ever +// locate page info outside the page-data itself. +static inline size_t mi_page_slice_offset_of(const mi_page_t* page, size_t offset_relative_to_page_start) { + return (page->page_start - mi_page_slice_start(page)) + offset_relative_to_page_start; +} + +static inline size_t mi_page_committed(const mi_page_t* page) { + return (page->slice_committed == 0 ? mi_page_size(page) : page->slice_committed - (page->page_start - mi_page_slice_start(page))); +} + +static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { + return page->heap; +} + + +// are all blocks in a page freed? +// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`. +static inline bool mi_page_all_free(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->used == 0); +} + +// are there immediately available blocks, i.e. blocks available on the free list. +static inline bool mi_page_immediate_available(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->free != NULL); +} + + +// is the page not yet used up to its reserved space? +static inline bool mi_page_is_expandable(const mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_internal(page->capacity <= page->reserved); + return (page->capacity < page->reserved); +} + + +static inline bool mi_page_is_full(mi_page_t* page) { + bool full = (page->reserved == page->used); + mi_assert_internal(!full || page->free == NULL); + return full; +} + +// is more than 7/8th of a page in use? +static inline bool mi_page_is_mostly_used(const mi_page_t* page) { + if (page==NULL) return true; + uint16_t frac = page->reserved / 8U; + return (page->reserved - page->used <= frac); +} + +// is more than (n-1)/n'th of a page in use? +static inline bool mi_page_is_used_at_frac(const mi_page_t* page, uint16_t n) { + if (page==NULL) return true; + uint16_t frac = page->reserved / n; + return (page->reserved - page->used <= frac); +} + + +static inline bool mi_page_is_huge(const mi_page_t* page) { + return (mi_page_is_singleton(page) && + (page->block_size > MI_LARGE_MAX_OBJ_SIZE || + (mi_memkind_is_os(page->memid.memkind) && page->memid.mem.os.base < (void*)page))); +} + +static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { + mi_page_queue_t* const pq = &((mi_heap_t*)heap)->pages[_mi_bin(size)]; + if (size <= MI_LARGE_MAX_OBJ_SIZE) { mi_assert_internal(pq->block_size <= MI_LARGE_MAX_OBJ_SIZE); } + return pq; +} + + +//----------------------------------------------------------- +// Page thread id and flags +//----------------------------------------------------------- + +// Thread id of thread that owns this page (with flags in the bottom 2 bits) +static inline mi_threadid_t mi_page_xthread_id(const mi_page_t* page) { + return mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_id); +} + +// Plain thread id of the thread that owns this page +static inline mi_threadid_t mi_page_thread_id(const mi_page_t* page) { + return (mi_page_xthread_id(page) & ~MI_PAGE_FLAG_MASK); +} + +static inline mi_page_flags_t mi_page_flags(const mi_page_t* page) { + return (mi_page_xthread_id(page) & MI_PAGE_FLAG_MASK); +} + +static inline void mi_page_flags_set(mi_page_t* page, bool set, mi_page_flags_t newflag) { + if (set) { mi_atomic_or_relaxed(&page->xthread_id, newflag); } + else { mi_atomic_and_relaxed(&page->xthread_id, ~newflag); } +} + +static inline bool mi_page_is_in_full(const mi_page_t* page) { + return ((mi_page_flags(page) & MI_PAGE_IN_FULL_QUEUE) != 0); +} + +static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) { + mi_page_flags_set(page, in_full, MI_PAGE_IN_FULL_QUEUE); +} + +static inline bool mi_page_has_aligned(const mi_page_t* page) { + return ((mi_page_flags(page) & MI_PAGE_HAS_ALIGNED) != 0); +} + +static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { + mi_page_flags_set(page, has_aligned, MI_PAGE_HAS_ALIGNED); +} + +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + // mi_assert_internal(!mi_page_is_in_full(page)); // can happen when destroying pages on heap_destroy + const mi_threadid_t tid = (heap == NULL ? MI_THREADID_ABANDONED : heap->tld->thread_id) | mi_page_flags(page); + if (heap != NULL) { + page->heap = heap; + page->heap_tag = heap->tag; + } + else { + page->heap = NULL; + } + mi_atomic_store_release(&page->xthread_id, tid); +} + +static inline bool mi_page_is_abandoned(const mi_page_t* page) { + // note: the xheap field of an abandoned heap is set to the subproc (for fast reclaim-on-free) + return (mi_page_thread_id(page) <= MI_THREADID_ABANDONED_MAPPED); +} + +static inline bool mi_page_is_abandoned_mapped(const mi_page_t* page) { + return (mi_page_thread_id(page) == MI_THREADID_ABANDONED_MAPPED); +} + +static inline void mi_page_set_abandoned_mapped(mi_page_t* page) { + mi_assert_internal(mi_page_is_abandoned(page)); + mi_atomic_or_relaxed(&page->xthread_id, MI_THREADID_ABANDONED_MAPPED); +} + +static inline void mi_page_clear_abandoned_mapped(mi_page_t* page) { + mi_assert_internal(mi_page_is_abandoned_mapped(page)); + mi_atomic_and_relaxed(&page->xthread_id, MI_PAGE_FLAG_MASK); +} + +//----------------------------------------------------------- +// Thread free list and ownership +//----------------------------------------------------------- + +// Thread free flag helpers +static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { + return (mi_block_t*)(tf & ~1); +} +static inline bool mi_tf_is_owned(mi_thread_free_t tf) { + return ((tf & 1) == 1); +} +static inline mi_thread_free_t mi_tf_create(mi_block_t* block, bool owned) { + return (mi_thread_free_t)((uintptr_t)block | (owned ? 1 : 0)); +} + +// Thread free access +static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { + return mi_tf_block(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free)); +} + +// are there any available blocks? +static inline bool mi_page_has_any_available(const mi_page_t* page) { + mi_assert_internal(page != NULL && page->reserved > 0); + return (page->used < page->reserved || (mi_page_thread_free(page) != NULL)); +} + + +// Owned? +static inline bool mi_page_is_owned(const mi_page_t* page) { + return mi_tf_is_owned(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free)); +} + +// Unown a page that is currently owned +static inline void _mi_page_unown_unconditional(mi_page_t* page) { + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_thread_id(page)==0); + const uintptr_t old = mi_atomic_and_acq_rel(&page->xthread_free, ~((uintptr_t)1)); + mi_assert_internal((old&1)==1); MI_UNUSED(old); +} + +// get ownership if it is not yet owned +static inline bool mi_page_try_claim_ownership(mi_page_t* page) { + const uintptr_t old = mi_atomic_or_acq_rel(&page->xthread_free, 1); + return ((old&1)==0); +} + +// release ownership of a page. This may free the page if all blocks were concurrently +// freed in the meantime. Returns true if the page was freed. +static inline bool _mi_page_unown(mi_page_t* page) { + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_thread_free_t tf_new; + mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free); + do { + mi_assert_internal(mi_tf_is_owned(tf_old)); + while mi_unlikely(mi_tf_block(tf_old) != NULL) { + _mi_page_free_collect(page, false); // update used + if (mi_page_all_free(page)) { // it may become free just before unowning it + _mi_arenas_page_unabandon(page); + _mi_arenas_page_free(page,NULL); + return true; + } + tf_old = mi_atomic_load_relaxed(&page->xthread_free); + } + mi_assert_internal(mi_tf_block(tf_old)==NULL); + tf_new = mi_tf_create(NULL, false); + } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new)); + return false; +} + + +/* ------------------------------------------------------------------- + Guarded objects +------------------------------------------------------------------- */ +#if MI_GUARDED + +// we always align guarded pointers in a block at an offset +// the block `next` field is then used as a tag to distinguish regular offset aligned blocks from guarded ones +#define MI_BLOCK_TAG_ALIGNED ((mi_encoded_t)(0)) +#define MI_BLOCK_TAG_GUARDED (~MI_BLOCK_TAG_ALIGNED) + +static inline bool mi_block_ptr_is_guarded(const mi_block_t* block, const void* p) { + const ptrdiff_t offset = (uint8_t*)p - (uint8_t*)block; + return (offset >= (ptrdiff_t)(sizeof(mi_block_t)) && block->next == MI_BLOCK_TAG_GUARDED); +} + +static inline bool mi_heap_malloc_use_guarded(mi_heap_t* heap, size_t size) { + // this code is written to result in fast assembly as it is on the hot path for allocation + const size_t count = heap->guarded_sample_count - 1; // if the rate was 0, this will underflow and count for a long time.. + if mi_likely(count != 0) { + // no sample + heap->guarded_sample_count = count; + return false; + } + else if (size >= heap->guarded_size_min && size <= heap->guarded_size_max) { + // use guarded allocation + heap->guarded_sample_count = heap->guarded_sample_rate; // reset + return (heap->guarded_sample_rate != 0); + } + else { + // failed size criteria, rewind count (but don't write to an empty heap) + if (heap->guarded_sample_rate != 0) { heap->guarded_sample_count = 1; } + return false; + } +} + +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; + +#endif + + +/* ------------------------------------------------------------------- +Encoding/Decoding the free list next pointers + +This is to protect against buffer overflow exploits where the +free list is mutated. Many hardened allocators xor the next pointer `p` +with a secret key `k1`, as `p^k1`. This prevents overwriting with known +values but might be still too weak: if the attacker can guess +the pointer `p` this can reveal `k1` (since `p^k1^p == k1`). +Moreover, if multiple blocks can be read as well, the attacker can +xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot +about the pointers (and subsequently `k1`). + +Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + next = (mi_block_t*)block->next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); + return next; +} + +static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { + mi_track_mem_undefined(block,sizeof(mi_block_t)); + #ifdef MI_ENCODE_FREELIST + block->next = mi_ptr_encode(null, next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + block->next = (mi_encoded_t)next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); +} + +static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { + #ifdef MI_ENCODE_FREELIST + mi_block_t* next = mi_block_nextx(page,block,page->keys); + // check for free list corruption: is `next` at least in the same page? + // TODO: check if `next` is `page->block_size` aligned? + if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { + _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + next = NULL; + } + return next; + #else + MI_UNUSED(page); + return mi_block_nextx(page,block,NULL); + #endif +} + +static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) { + #ifdef MI_ENCODE_FREELIST + mi_block_set_nextx(page,block,next, page->keys); + #else + MI_UNUSED(page); + mi_block_set_nextx(page,block,next,NULL); + #endif +} + +/* ----------------------------------------------------------- + arena blocks +----------------------------------------------------------- */ + +// Blocks needed for a given byte size +static inline size_t mi_slice_count_of_size(size_t size) { + return _mi_divide_up(size, MI_ARENA_SLICE_SIZE); +} + +// Byte size of a number of blocks +static inline size_t mi_size_of_slices(size_t bcount) { + return (bcount * MI_ARENA_SLICE_SIZE); +} + + +/* ----------------------------------------------------------- + memory id's +----------------------------------------------------------- */ + +static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) { + mi_memid_t memid; + _mi_memzero_var(memid); + memid.memkind = memkind; + return memid; +} + +static inline mi_memid_t _mi_memid_none(void) { + return _mi_memid_create(MI_MEM_NONE); +} + +static inline mi_memid_t _mi_memid_create_os(void* base, size_t size, bool committed, bool is_zero, bool is_large) { + mi_memid_t memid = _mi_memid_create(MI_MEM_OS); + memid.mem.os.base = base; + memid.mem.os.size = size; + memid.initially_committed = committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return memid; +} + +static inline mi_memid_t _mi_memid_create_meta(void* mpage, size_t block_idx, size_t block_count) { + mi_memid_t memid = _mi_memid_create(MI_MEM_META); + memid.mem.meta.meta_page = mpage; + memid.mem.meta.block_index = (uint32_t)block_idx; + memid.mem.meta.block_count = (uint32_t)block_count; + memid.initially_committed = true; + memid.initially_zero = true; + memid.is_pinned = true; + return memid; +} + + +// ------------------------------------------------------------------- +// Fast "random" shuffle +// ------------------------------------------------------------------- + +static inline uintptr_t _mi_random_shuffle(uintptr_t x) { + if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros +#if (MI_INTPTR_SIZE>=8) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#elif (MI_INTPTR_SIZE==4) + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + return x; +} + + +// --------------------------------------------------------------------------------- +// Provide our own `_mi_memcpy` for potential performance optimizations. +// +// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if +// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support +// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. +// --------------------------------------------------------------------------------- + +#if !MI_TRACK_ENABLED && defined(_WIN32) && (MI_ARCH_X64 || MI_ARCH_X86) +extern mi_decl_hidden bool _mi_cpu_has_fsrm; +extern mi_decl_hidden bool _mi_cpu_has_erms; + +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { + __movsb((unsigned char*)dst, (const unsigned char*)src, n); + } + else { + memcpy(dst, src, n); + } +} +static inline void _mi_memset(void* dst, int val, size_t n) { + if ((_mi_cpu_has_fsrm && n <= 128) || (_mi_cpu_has_erms && n > 128)) { + __stosb((unsigned char*)dst, (uint8_t)val, n); + } + else { + memset(dst, val, n); + } +} +#else +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + memcpy(dst, src, n); +} +static inline void _mi_memset(void* dst, int val, size_t n) { + memset(dst, val, n); +} +#endif + +// ------------------------------------------------------------------------------- +// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned +// This is used for example in `mi_realloc`. +// ------------------------------------------------------------------------------- + +#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) +// On GCC/CLang we provide a hint that the pointers are word aligned. +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE); + _mi_memcpy(adst, asrc, n); +} + +static inline void _mi_memset_aligned(void* dst, int val, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + _mi_memset(adst, val, n); +} +#else +// Default fallback on `_mi_memcpy` +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + _mi_memcpy(dst, src, n); +} + +static inline void _mi_memset_aligned(void* dst, int val, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + _mi_memset(dst, val, n); +} +#endif + +static inline void _mi_memzero(void* dst, size_t n) { + _mi_memset(dst, 0, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + _mi_memset_aligned(dst, 0, n); +} + + +#endif // MI_INTERNAL_H diff --git a/depends/mimalloc/include/mimalloc/prim.h b/depends/mimalloc/include/mimalloc/prim.h new file mode 100644 index 000000000000..daddaf8b1cd5 --- /dev/null +++ b/depends/mimalloc/include/mimalloc/prim.h @@ -0,0 +1,431 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MI_PRIM_H +#define MI_PRIM_H + + +// -------------------------------------------------------------------------- +// This file specifies the primitive portability API. +// Each OS/host needs to implement these primitives, see `src/prim` +// for implementations on Window, macOS, WASI, and Linux/Unix. +// +// note: on all primitive functions, we always have result parameters != NULL, and: +// addr != NULL and page aligned +// size > 0 and page aligned +// the return value is an error code as an `int` where 0 is success +// -------------------------------------------------------------------------- + +// OS memory configuration +typedef struct mi_os_mem_config_s { + size_t page_size; // default to 4KiB + size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows) + size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB) + size_t physical_memory_in_kib; // physical memory size in KiB + size_t virtual_address_bits; // usually 48 or 56 bits on 64-bit systems. (used to determine secure randomization) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc) + bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) +} mi_os_mem_config_t; + +// Initialize +void _mi_prim_mem_init( mi_os_mem_config_t* config ); + +// Free OS memory +int _mi_prim_free(void* addr, size_t size ); + +// Allocate OS memory. Return NULL on error. +// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. +// If `commit` is false, the virtual memory range only needs to be reserved (with no access) +// which will later be committed explicitly using `_mi_prim_commit`. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// The `hint_addr` address is either `NULL` or a preferred allocation address but can be ignored. +// pre: !commit => !allow_large +// try_alignment >= _mi_os_page_size() and a power of 2 +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); + +// Commit memory. Returns error code or 0 on success. +// For example, on Linux this would make the memory PROT_READ|PROT_WRITE. +// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows) +int _mi_prim_commit(void* addr, size_t size, bool* is_zero); + +// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true +// if the memory would need to be re-committed. For example, on Windows this is always true, +// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit. +// pre: needs_recommit != NULL +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); + +// Reset memory. The range keeps being accessible but the content might be reset to zero at any moment. +// Returns error code or 0 on success. +int _mi_prim_reset(void* addr, size_t size); + +// Reuse memory. This is called for memory that is already committed but +// may have been reset (`_mi_prim_reset`) or decommitted (`_mi_prim_decommit`) where `needs_recommit` was false. +// Returns error code or 0 on success. On most platforms this is a no-op. +int _mi_prim_reuse(void* addr, size_t size); + +// Protect memory. Returns error code or 0 on success. +int _mi_prim_protect(void* addr, size_t size, bool protect); + +// Allocate huge (1GiB) pages possibly associated with a NUMA node. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: size > 0 and a multiple of 1GiB. +// numa_node is either negative (don't care), or a numa node number. +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); + +// Return the current NUMA node +size_t _mi_prim_numa_node(void); + +// Return the number of logical NUMA nodes +size_t _mi_prim_numa_node_count(void); + +// Clock ticks +mi_msecs_t _mi_prim_clock_now(void); + +// Return process information (only for statistics) +typedef struct mi_process_info_s { + mi_msecs_t elapsed; + mi_msecs_t utime; + mi_msecs_t stime; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; +} mi_process_info_t; + +void _mi_prim_process_info(mi_process_info_t* pinfo); + +// Default stderr output. (only for warnings etc. with verbose enabled) +// msg != NULL && _mi_strlen(msg) > 0 +void _mi_prim_out_stderr( const char* msg ); + +// Get an environment variable. (only for options) +// name != NULL, result != NULL, result_size >= 64 +bool _mi_prim_getenv(const char* name, char* result, size_t result_size); + + +// Fill a buffer with strong randomness; return `false` on error or if +// there is no strong randomization available. +bool _mi_prim_random_buf(void* buf, size_t buf_len); + +// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination. +void _mi_prim_thread_init_auto_done(void); + +// Called on process exit and may take action to clean up resources associated with the thread auto done. +void _mi_prim_thread_done_auto_done(void); + +// Called when the default heap for a thread changes +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); + +// Is this thread part of a thread pool? +bool _mi_prim_thread_is_in_threadpool(void); + + +//------------------------------------------------------------------- +// Access to TLS (thread local storage) slots. +// We need fast access to both a unique thread id (in `free.c:mi_free`) and +// to a thread-local heap pointer (in `alloc.c:mi_malloc`). +// To achieve this we use specialized code for various platforms. +//------------------------------------------------------------------- + +// On some libc + platform combinations we can directly access a thread-local storage (TLS) slot. +// The TLS layout depends on both the OS and libc implementation so we use specific tests for each main platform. +// If you test on another platform and it works please send a PR :-) +// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. +// +// Note: we would like to prefer `__builtin_thread_pointer()` nowadays instead of using assembly, +// but unfortunately we can not detect support reliably (see issue #883) +// We also use it on Apple OS as we use a TLS slot for the default heap there. +#if defined(__GNUC__) && ( \ + (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \ + || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \ + || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \ + || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + ) + +#define MI_HAS_TLS_SLOT 1 + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + void* res; + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + res = tcb[slot]; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + res = tcb[slot]; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + res = pthread_getspecific(slot); + #endif + return res; +} + +// setting a tls slot is only used on macOS for now +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + tcb[slot] = value; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + tcb[slot] = value; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + pthread_setspecific(slot, value); + #endif +} + +#elif _WIN32 && MI_WIN_USE_FIXED_TLS && !defined(MI_WIN_USE_FLS) + +// On windows we can store the thread-local heap at a fixed TLS slot to avoid +// thread-local initialization checks in the fast path. +// We allocate a user TLS slot at process initialization (see `windows/prim.c`) +// and store the offset `_mi_win_tls_offset`. +#define MI_HAS_TLS_SLOT 1 // 2 = we can reliably initialize the slot (saving a test on each malloc) + +extern mi_decl_hidden size_t _mi_win_tls_offset; + +#if MI_WIN_USE_FIXED_TLS > 1 +#define MI_TLS_SLOT (MI_WIN_USE_FIXED_TLS) +#elif MI_SIZE_SIZE == 4 +#define MI_TLS_SLOT (0x0E10 + _mi_win_tls_offset) // User TLS slots +#else +#define MI_TLS_SLOT (0x1480 + _mi_win_tls_offset) // User TLS slots +#endif + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + #if (_M_X64 || _M_AMD64) && !defined(_M_ARM64EC) + return (void*)__readgsqword((unsigned long)slot); // direct load at offset from gs + #elif _M_IX86 && !defined(_M_ARM64EC) + return (void*)__readfsdword((unsigned long)slot); // direct load at offset from fs + #else + return ((void**)NtCurrentTeb())[slot / sizeof(void*)]; + #endif +} +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + ((void**)NtCurrentTeb())[slot / sizeof(void*)] = value; +} + +#endif + + + +//------------------------------------------------------------------- +// Get a fast unique thread id. +// +// Getting the thread id should be performant as it is called in the +// fast path of `_mi_free` and we specialize for various platforms as +// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. +// We only require _mi_prim_thread_id() to return a unique id +// for each thread (unequal to zero). +//------------------------------------------------------------------- + + +// Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id +// but unfortunately, it seems we cannot test for this reliably at this time (see issue #883) +// Nevertheless, it seems needed on older graviton platforms (see issue #851). +// For now, we only enable this for specific platforms. +#if !defined(__APPLE__) /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly ()*/ \ + && !defined(__CYGWIN__) \ + && !defined(MI_LIBC_MUSL) \ + && (!defined(__clang_major__) || __clang_major__ >= 14) /* older clang versions emit bad code; fall back to using the TLS slot () */ + #if (defined(__GNUC__) && (__GNUC__ >= 7) && defined(__aarch64__)) /* aarch64 for older gcc versions (issue #851) */ \ + || (defined(__GNUC__) && (__GNUC__ >= 11) && defined(__x86_64__)) \ + || (defined(__clang_major__) && (__clang_major__ >= 14) && (defined(__aarch64__) || defined(__x86_64__))) + #define MI_USE_BUILTIN_THREAD_POINTER 1 + #endif +#endif + + + +// defined in `init.c`; do not use these directly +extern mi_decl_hidden mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from +extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called? + +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept; + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + const mi_threadid_t tid = __mi_prim_thread_id(); + mi_assert_internal(tid > 1); + mi_assert_internal((tid & MI_PAGE_FLAG_MASK) == 0); // bottom 2 bits are clear? + return tid; +} + +// Get a unique id for the current thread. +#if defined(MI_PRIM_THREAD_ID) + +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept { + return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) +} + +#elif defined(_WIN32) + +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept { + // Windows: works on Intel and ARM in both 32- and 64-bit + return (uintptr_t)NtCurrentTeb(); +} + +#elif MI_USE_BUILTIN_THREAD_POINTER + +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept { + // Works on most Unix based platforms with recent compilers + return (uintptr_t)__builtin_thread_pointer(); +} + +#elif MI_HAS_TLS_SLOT + +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept { + #if defined(__BIONIC__) + // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id + // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 + return (uintptr_t)mi_prim_tls_slot(1); + #else + // in all our other targets, slot 0 is the thread id + // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h + // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 + return (uintptr_t)mi_prim_tls_slot(0); + #endif +} + +#else + +// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). +static inline mi_threadid_t __mi_prim_thread_id(void) mi_attr_noexcept { + return (uintptr_t)&_mi_heap_default; +} + +#endif + + + +/* ---------------------------------------------------------------------------------------- +Get the thread local default heap: `_mi_prim_get_default_heap()` + +This is inlined here as it is on the fast path for allocation functions. + +On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a +__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures +that the storage will always be available (allocated on the thread stacks). + +On some platforms though we cannot use that when overriding `malloc` since the underlying +TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. +We try to circumvent this in an efficient way: +- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the + loader itself calls `malloc` even before the modules are initialized. +- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). +- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) +------------------------------------------------------------------------------------------- */ + +static inline mi_heap_t* mi_prim_get_default_heap(void); + +#if defined(MI_MALLOC_OVERRIDE) +#if defined(__APPLE__) // macOS + #define MI_TLS_SLOT 89 // seems unused? + // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) + // see +#elif defined(__OpenBSD__) + // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) + // see + #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) + // #elif defined(__DragonFly__) + // #warning "mimalloc is not working correctly on DragonFly yet." + // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) +#elif defined(__ANDROID__) + // See issue #381 + #define MI_TLS_PTHREAD +#endif +#endif + + +#if MI_TLS_SLOT +# if !defined(MI_HAS_TLS_SLOT) +# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined" +# endif + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); + #if MI_HAS_TLS_SLOT == 1 // check if the TLS slot is initialized + if mi_unlikely(heap == NULL) { + #ifdef __GNUC__ + __asm(""); // prevent conditional load of the address of _mi_heap_empty + #endif + heap = (mi_heap_t*)&_mi_heap_empty; + } + #endif + return heap; +} + +#elif defined(MI_TLS_PTHREAD_SLOT_OFS) + +static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) { + pthread_t self = pthread_self(); + #if defined(__DragonFly__) + if (self==NULL) return NULL; + #endif + return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); +} + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot(); + if mi_unlikely(pheap == NULL) return _mi_heap_main_get(); + mi_heap_t* heap = *pheap; + if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty; + return heap; +} + +#elif defined(MI_TLS_PTHREAD) + +extern mi_decl_hidden pthread_key_t _mi_heap_default_key; +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); + return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); +} + +#else // default using a thread local variable; used on most platforms. + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + #if defined(MI_TLS_RECURSE_GUARD) + if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); + #endif + return _mi_heap_default; +} + +#endif // mi_prim_get_default_heap() + + +#endif // MI_PRIM_H diff --git a/depends/mimalloc/include/mimalloc/track.h b/depends/mimalloc/include/mimalloc/track.h new file mode 100644 index 000000000000..199308a64a86 --- /dev/null +++ b/depends/mimalloc/include/mimalloc/track.h @@ -0,0 +1,145 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MI_TRACK_H +#define MI_TRACK_H + +/* ------------------------------------------------------------------------------------------------------ +Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers. +These can be defined for tracking allocation: + + #define mi_track_malloc_size(p,reqsize,size,zero) + #define mi_track_free_size(p,_size) + +The macros are set up such that the size passed to `mi_track_free_size` +always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`). +The `reqsize` is what the user requested, and `size >= reqsize`. +The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled, +or otherwise it is the usable block size which may be larger than the original request. +Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc). +The `zero` parameter is `true` if the allocated block is zero initialized. + +Optional: + + #define mi_track_align(p,alignedp,offset,size) + #define mi_track_resize(p,oldsize,newsize) + #define mi_track_init() + +The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block. +The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`). +The `mi_track_resize` is currently unused but could be called on reallocations within a block. +`mi_track_init` is called at program start. + +The following macros are for tools like asan and valgrind to track whether memory is +defined, undefined, or not accessible at all: + + #define mi_track_mem_defined(p,size) + #define mi_track_mem_undefined(p,size) + #define mi_track_mem_noaccess(p,size) + +-------------------------------------------------------------------------------------------------------*/ + +#if MI_TRACK_VALGRIND +// valgrind tool + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy +#define MI_TRACK_TOOL "valgrind" + +#include +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) +#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) +#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) + +#elif MI_TRACK_ASAN +// address sanitizer + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "asan" + +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) +#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size) + +#elif MI_TRACK_ETW +// windows event tracing + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 +#define MI_TRACK_TOOL "ETW" + +#include "../src/prim/windows/etw.h" + +#define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); +#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size) +#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size) + +#else +// no tracking + +#define MI_TRACK_ENABLED 0 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "none" + +#define mi_track_malloc_size(p,reqsize,size,zero) +#define mi_track_free_size(p,_size) + +#endif + +// ------------------- +// Utility definitions + +#ifndef mi_track_resize +#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) +#endif + +#ifndef mi_track_align +#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset) +#endif + +#ifndef mi_track_init +#define mi_track_init() +#endif + +#ifndef mi_track_mem_defined +#define mi_track_mem_defined(p,size) +#endif + +#ifndef mi_track_mem_undefined +#define mi_track_mem_undefined(p,size) +#endif + +#ifndef mi_track_mem_noaccess +#define mi_track_mem_noaccess(p,size) +#endif + + +#if MI_PADDING +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)==(reqsize)); \ + mi_track_malloc_size(p,reqsize,reqsize,zero); \ + } +#else +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)>=(reqsize)); \ + mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \ + } +#endif + +#endif // MI_TRACK_H diff --git a/depends/mimalloc/include/mimalloc/types.h b/depends/mimalloc/include/mimalloc/types.h new file mode 100644 index 000000000000..00b05e1d4ce8 --- /dev/null +++ b/depends/mimalloc/include/mimalloc/types.h @@ -0,0 +1,614 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MI_TYPES_H +#define MI_TYPES_H + +// -------------------------------------------------------------------------- +// This file contains the main type definitions for mimalloc: +// mi_heap_t : all data for a thread-local heap, contains +// lists of all managed heap pages. +// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from +// where objects of a single size are allocated. +// Note: we write "OS page" for OS memory pages while +// using plain "page" for mimalloc pages (`mi_page_t`). +// -------------------------------------------------------------------------- + + +#include +#include // ptrdiff_t +#include // uintptr_t, uint16_t, etc +#include // SIZE_MAX etc. +#include // error codes +#include "bits.h" // size defines (MI_INTPTR_SIZE etc), bit operations +#include "atomic.h" // _Atomic primitives + +// Minimal alignment necessary. On most platforms 16 bytes are needed +// due to SSE registers for example. This must be at least `sizeof(void*)` +#ifndef MI_MAX_ALIGN_SIZE +#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) +#endif + +// ------------------------------------------------------ +// Variants +// ------------------------------------------------------ + +// Define NDEBUG in the release version to disable assertions. +// #define NDEBUG + +// Define MI_TRACK_ to enable tracking support +// #define MI_TRACK_VALGRIND 1 +// #define MI_TRACK_ASAN 1 +// #define MI_TRACK_ETW 1 + +// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). +// #define MI_STAT 1 + +// Define MI_SECURE to enable security mitigations. Level 1 has minimal performance impact, +// but protects most metadata with guard pages: +// #define MI_SECURE 1 // guard page around metadata; check pointer validity on free +// +// Level 2 has more performance impact but protect well against various buffer overflows +// by surrounding all mimalloc pages with guard pages: +// #define MI_SECURE 2 // guard page around each mimalloc page (can fragment VMA's with large heaps..) +// +// The next two levels can have more performance cost: +// #define MI_SECURE 3 // randomize allocations, encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free) +// #define MI_SECURE 4 // checks for double free. (may be more expensive) + +#if !defined(MI_SECURE) +#define MI_SECURE 0 +#endif + +// Define MI_DEBUG for debug mode +// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free. +// #define MI_DEBUG 2 // + internal assertion checks +// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) +#if !defined(MI_DEBUG) +#if defined(MI_BUILD_RELEASE) || defined(NDEBUG) +#define MI_DEBUG 0 +#else +#define MI_DEBUG 2 +#endif +#endif + +// Statistics (0=only essential, 1=normal, 2=more fine-grained (expensive) tracking) +#ifndef MI_STAT +#if (MI_DEBUG>0) +#define MI_STAT 2 +#else +#define MI_STAT 0 +#endif +#endif + +// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options) +// Padding should be disabled when using guard pages +// #define MI_GUARDED 1 +#if defined(MI_GUARDED) +#define MI_PADDING 0 +#endif + +// Reserve extra padding at the end of each block to be more resilient against heap block overflows. +// The padding can detect buffer overflow on free. +#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) +#define MI_PADDING 1 +#endif + +// Check padding bytes; allows byte-precise buffer overflow detection +#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_PADDING_CHECK 1 +#endif + + +// Encoded free lists allow detection of corrupted free lists +// and can detect buffer overflows, modify after free, and double `free`s. +#if (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_ENCODE_FREELIST 1 +#endif + +// Enable large pages for objects between 64KiB and 512KiB. +// Disabled by default as for many workloads the block sizes above 64 KiB are quite random which can lead to too many partially used large pages. +#ifndef MI_ENABLE_LARGE_PAGES +#define MI_ENABLE_LARGE_PAGES 0 +#endif + +// -------------------------------------------------------------- +// Sizes of internal data-structures +// (comments specify sizes on 64-bit, usually 32-bit is halved) +// -------------------------------------------------------------- + +// Main size parameter; determines max arena sizes and max arena object sizes etc. +#ifndef MI_ARENA_SLICE_SHIFT + #ifdef MI_SMALL_PAGE_SHIFT // backward compatibility + #define MI_ARENA_SLICE_SHIFT MI_SMALL_PAGE_SHIFT + #else + #define MI_ARENA_SLICE_SHIFT (14 + MI_SIZE_SHIFT) // 64 KiB (32 KiB on 32-bit) + #endif +#endif +#if MI_ARENA_SLICE_SHIFT < 12 +#error Arena slices should be at least 4KiB +#endif + +#ifndef MI_BCHUNK_BITS_SHIFT + #if MI_ARENA_SLICE_SHIFT <= 13 // <= 8KiB + #define MI_BCHUNK_BITS_SHIFT (7) // 128 bits + #elif MI_ARENA_SLICE_SHIFT < 16 // <= 32KiB + #define MI_BCHUNK_BITS_SHIFT (8) // 256 bits + #else + #define MI_BCHUNK_BITS_SHIFT (6 + MI_SIZE_SHIFT) // 512 bits (or 256 on 32-bit) + #endif +#endif + +#define MI_BCHUNK_BITS (1 << MI_BCHUNK_BITS_SHIFT) // sub-bitmaps are "bchunks" of 512 bits +#define MI_ARENA_SLICE_SIZE (MI_ZU(1) << MI_ARENA_SLICE_SHIFT) // arena's allocate in slices of 64 KiB +#define MI_ARENA_SLICE_ALIGN (MI_ARENA_SLICE_SIZE) + +#define MI_ARENA_MIN_OBJ_SLICES (1) +#define MI_ARENA_MAX_OBJ_SLICES (MI_BCHUNK_BITS) // 32 MiB (for now, cannot cross chunk boundaries) + +#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_MIN_OBJ_SLICES * MI_ARENA_SLICE_SIZE) +#define MI_ARENA_MAX_OBJ_SIZE (MI_ARENA_MAX_OBJ_SLICES * MI_ARENA_SLICE_SIZE) + +#if MI_ARENA_MAX_OBJ_SIZE < MI_SIZE_SIZE*1024 +#error maximum object size may be too small to hold local thread data +#endif + +#define MI_SMALL_PAGE_SIZE MI_ARENA_MIN_OBJ_SIZE // 64 KiB +#define MI_MEDIUM_PAGE_SIZE (8*MI_SMALL_PAGE_SIZE) // 512 KiB (=byte in the bchunk bitmap) +#define MI_LARGE_PAGE_SIZE (MI_SIZE_SIZE*MI_MEDIUM_PAGE_SIZE) // 4 MiB (=word in the bchunk bitmap) + + +// Maximum number of size classes. (spaced exponentially in 12.5% increments) +#if MI_BIN_HUGE != 73U +#error "mimalloc internal: expecting 73 bins" +#endif +#define MI_BIN_FULL (MI_BIN_HUGE+1) +#define MI_BIN_COUNT (MI_BIN_FULL+1) + +// We never allocate more than PTRDIFF_MAX (see also ) +#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX + +// Minimal commit for a page on-demand commit (should be >= OS page size) +#define MI_PAGE_MIN_COMMIT_SIZE MI_ARENA_SLICE_SIZE // (4*MI_KiB) + + +// ------------------------------------------------------ +// Arena's are large reserved areas of memory allocated from +// the OS that are managed by mimalloc to efficiently +// allocate MI_ARENA_SLICE_SIZE slices of memory for the +// mimalloc pages. +// ------------------------------------------------------ + +// A large memory arena where pages are allocated in. +typedef struct mi_arena_s mi_arena_t; // defined below + + +// --------------------------------------------------------------- +// a memory id tracks the provenance of arena/OS allocated memory +// --------------------------------------------------------------- + +// Memory can reside in arena's, direct OS allocated, meta-data pages, or statically allocated. +// The memid keeps track of this. +typedef enum mi_memkind_e { + MI_MEM_NONE, // not allocated + MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example) + MI_MEM_STATIC, // allocated in a static area and should not be freed (the initial main heap data for example (`init.c`)) + MI_MEM_META, // allocated with the meta data allocator (`arena-meta.c`) + MI_MEM_OS, // allocated from the OS + MI_MEM_OS_HUGE, // allocated as huge OS pages (usually 1GiB, pinned to physical memory) + MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`) + MI_MEM_ARENA // allocated from an arena (the usual case) (`arena.c`) +} mi_memkind_t; + +static inline bool mi_memkind_is_os(mi_memkind_t memkind) { + return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP); +} + +static inline bool mi_memkind_needs_no_free(mi_memkind_t memkind) { + return (memkind <= MI_MEM_STATIC); +} + + +typedef struct mi_memid_os_info { + void* base; // actual base address of the block (used for offset aligned allocations) + size_t size; // allocated full size + // size_t alignment; // alignment at allocation +} mi_memid_os_info_t; + +typedef struct mi_memid_arena_info { + mi_arena_t* arena; // arena that contains this memory + uint32_t slice_index; // slice index in the arena + uint32_t slice_count; // allocated slices +} mi_memid_arena_info_t; + +typedef struct mi_memid_meta_info { + void* meta_page; // meta-page that contains the block + uint32_t block_index; // block index in the meta-data page + uint32_t block_count; // allocated blocks +} mi_memid_meta_info_t; + +typedef struct mi_memid_s { + union { + mi_memid_os_info_t os; // only used for MI_MEM_OS + mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA + mi_memid_meta_info_t meta; // only used for MI_MEM_META + } mem; + mi_memkind_t memkind; + bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages) + bool initially_committed;// `true` if the memory was originally allocated as committed + bool initially_zero; // `true` if the memory was originally zero initialized +} mi_memid_t; + + +static inline bool mi_memid_is_os(mi_memid_t memid) { + return mi_memkind_is_os(memid.memkind); +} + +static inline bool mi_memid_needs_no_free(mi_memid_t memid) { + return mi_memkind_needs_no_free(memid.memkind); +} + +static inline mi_arena_t* mi_memid_arena(mi_memid_t memid) { + return (memid.memkind == MI_MEM_ARENA ? memid.mem.arena.arena : NULL); +} + + +// ------------------------------------------------------ +// Mimalloc pages contain allocated blocks +// ------------------------------------------------------ + +// The free lists use encoded next fields +// (Only actually encodes when MI_ENCODED_FREELIST is defined.) +typedef uintptr_t mi_encoded_t; + +// thread id's +typedef size_t mi_threadid_t; + +// free lists contain blocks +typedef struct mi_block_s { + mi_encoded_t next; +} mi_block_t; + + +// The page flags are put in the bottom 2 bits of the thread_id (for a fast test in `mi_free`) +// `has_aligned` is true if the page has pointers at an offset in a block (so we unalign before free-ing) +// `in_full_queue` is true if the page is full and resides in the full queue (so we move it to a regular queue on free-ing) +#define MI_PAGE_IN_FULL_QUEUE MI_ZU(0x01) +#define MI_PAGE_HAS_ALIGNED MI_ZU(0x02) +#define MI_PAGE_FLAG_MASK MI_ZU(0x03) +typedef size_t mi_page_flags_t; + +// There are two special threadid's: 0 for abandoned threads, and 4 for abandoned & mapped threads -- +// abandoned-mapped pages are abandoned but also mapped in an arena so can be quickly found for reuse. +#define MI_THREADID_ABANDONED MI_ZU(0) +#define MI_THREADID_ABANDONED_MAPPED (MI_PAGE_FLAG_MASK + 1) + +// Thread free list. +// Points to a list of blocks that are freed by other threads. +// The least-bit is set if the page is owned by the current thread. (`mi_page_is_owned`). +// Ownership is required before we can read any non-atomic fields in the page. +// This way we can push a block on the thread free list and try to claim ownership atomically in `free.c:mi_free_block_mt`. +typedef uintptr_t mi_thread_free_t; + +// A heap can serve only specific objects signified by its heap tag (e.g. various object types in CPython) +typedef uint8_t mi_heaptag_t; + +// A page contains blocks of one specific size (`block_size`). +// Each page has three list of free blocks: +// `free` for blocks that can be allocated, +// `local_free` for freed blocks that are not yet available to `mi_malloc` +// `thread_free` for freed blocks by other threads +// The `local_free` and `thread_free` lists are migrated to the `free` list +// when it is exhausted. The separate `local_free` list is necessary to +// implement a monotonic heartbeat. The `thread_free` list is needed for +// avoiding atomic operations when allocating from the owning thread. +// +// `used - |thread_free|` == actual blocks that are in use (alive) +// `used - |thread_free| + |free| + |local_free| == capacity` +// +// We don't count "freed" (as |free|) but use only the `used` field to reduce +// the number of memory accesses in the `mi_page_all_free` function(s). +// Use `_mi_page_free_collect` to collect the thread_free list and update the `used` count. +// +// Notes: +// - Non-atomic fields can only be accessed if having _ownership_ (low bit of `xthread_free` is 1). +// Combining the `thread_free` list with an ownership bit allows a concurrent `free` to atomically +// free an object and (re)claim ownership if the page was abandoned. +// - If a page is not part of a heap it is called "abandoned" (`heap==NULL`) -- in +// that case the `xthreadid` is 0 or 4 (4 is for abandoned pages that +// are in the abandoned page lists of an arena, these are called "mapped" abandoned pages). +// - page flags are in the bottom 3 bits of `xthread_id` for the fast path in `mi_free`. +// - The layout is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc` +// - Using `uint16_t` does not seem to slow things down + +typedef struct mi_page_s { + _Atomic(mi_threadid_t) xthread_id; // thread this page belongs to. (= `heap->thread_id (or 0 or 4 if abandoned) | page_flags`) + + mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) + uint16_t used; // number of blocks in use (including blocks in `thread_free`) + uint16_t capacity; // number of blocks committed + uint16_t reserved; // number of blocks reserved in memory + uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`) + uint8_t retire_expire; // expiration count for retired blocks + + mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) + _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads (= `mi_block_t* | (1 if owned)`) + + size_t block_size; // size available in each block (always `>0`) + uint8_t* page_start; // start of the blocks + mi_heaptag_t heap_tag; // tag of the owning heap, used to separate heaps by object type + bool free_is_zero; // `true` if the blocks in the free list are zero initialized + // padding + #if (MI_ENCODE_FREELIST || MI_PADDING) + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary + #endif + + mi_heap_t* heap; // the heap owning this page (or NULL for abandoned pages) + struct mi_page_s* next; // next page owned by the heap with the same `block_size` + struct mi_page_s* prev; // previous page owned by the heap with the same `block_size` + size_t slice_committed; // committed size relative to the first arena slice of the page data (or 0 if the page is fully committed already) + mi_memid_t memid; // provenance of the page memory +} mi_page_t; + + +// ------------------------------------------------------ +// Object sizes +// ------------------------------------------------------ + +#define MI_PAGE_ALIGN MI_ARENA_SLICE_ALIGN // pages must be aligned on this for the page map. +#define MI_PAGE_MIN_START_BLOCK_ALIGN MI_MAX_ALIGN_SIZE // minimal block alignment for the first block in a page (16b) +#define MI_PAGE_MAX_START_BLOCK_ALIGN2 MI_KiB // maximal block alignment for "power of 2"-sized blocks (such that we guarantee natural alignment) +#define MI_PAGE_MAX_OVERALLOC_ALIGN MI_ARENA_SLICE_SIZE // (64 KiB) limit for which we overallocate in arena pages, beyond this use OS allocation + +#if (MI_ENCODE_FREELIST || MI_PADDING) && MI_SIZE_SIZE == 8 +#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+2)*32) // 160 >= sizeof(mi_page_t) +#else +#define MI_PAGE_INFO_SIZE ((MI_INTPTR_SHIFT+1)*32) // 128/96 >= sizeof(mi_page_t) +#endif + +// The max object size are checked to not waste more than 12.5% internally over the page sizes. +#define MI_SMALL_MAX_OBJ_SIZE ((MI_SMALL_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~8 KiB +#if MI_ENABLE_LARGE_PAGES +#define MI_MEDIUM_MAX_OBJ_SIZE ((MI_MEDIUM_PAGE_SIZE-MI_PAGE_INFO_SIZE)/8) // < ~64 KiB +#define MI_LARGE_MAX_OBJ_SIZE (MI_LARGE_PAGE_SIZE/8) // <= 512KiB // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin` +#else +#define MI_MEDIUM_MAX_OBJ_SIZE (MI_MEDIUM_PAGE_SIZE/8) // <= 64 KiB +#define MI_LARGE_MAX_OBJ_SIZE MI_MEDIUM_MAX_OBJ_SIZE // note: this must be a nice power of 2 or we get rounding issues with `_mi_bin` +#endif +#define MI_LARGE_MAX_OBJ_WSIZE (MI_LARGE_MAX_OBJ_SIZE/MI_SIZE_SIZE) + +#if (MI_LARGE_MAX_OBJ_WSIZE >= 655360) +#error "mimalloc internal: define more bins" +#endif + + +// ------------------------------------------------------ +// Page kinds +// ------------------------------------------------------ + +typedef enum mi_page_kind_e { + MI_PAGE_SMALL, // small blocks go into 64KiB pages + MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages + MI_PAGE_LARGE, // larger blocks go into 4MiB pages (if `MI_ENABLE_LARGE_PAGES==1`) + MI_PAGE_SINGLETON // page containing a single block. + // used for blocks `> MI_LARGE_MAX_OBJ_SIZE` or an aligment `> MI_PAGE_MAX_OVERALLOC_ALIGN`. +} mi_page_kind_t; + + + +// ------------------------------------------------------ +// Heaps +// +// Provide first-class heaps to allocate from. +// A heap just owns a set of pages for allocation and +// can only be allocate/reallocate from the thread that created it. +// Freeing blocks can be done from any thread though. +// +// Per thread, there is always a default heap that is +// used for allocation; it is initialized to statically +// point to an empty heap to avoid initialization checks +// in the fast path. +// ------------------------------------------------------ + +// Thread local data +typedef struct mi_tld_s mi_tld_t; // defined below + +// Pages of a certain block size are held in a queue. +typedef struct mi_page_queue_s { + mi_page_t* first; + mi_page_t* last; + size_t count; + size_t block_size; +} mi_page_queue_t; + +// Random context +typedef struct mi_random_cxt_s { + uint32_t input[16]; + uint32_t output[16]; + int output_available; + bool weak; +} mi_random_ctx_t; + + +// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows +#if MI_PADDING +typedef struct mi_padding_s { + uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) + uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) +} mi_padding_t; +#define MI_PADDING_SIZE (sizeof(mi_padding_t)) +#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) +#else +#define MI_PADDING_SIZE 0 +#define MI_PADDING_WSIZE 0 +#endif + +#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) + + +// A heap owns a set of pages. +struct mi_heap_s { + mi_tld_t* tld; // thread-local data + mi_arena_t* exclusive_arena; // if the heap should only allocate from a specific arena (or NULL) + int numa_node; // preferred numa node (or -1 for no preference) + uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) + mi_random_ctx_t random; // random number context used for secure allocation + size_t page_count; // total number of pages in the `pages` queues. + size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) + size_t page_retired_max; // largest retired index into the `pages` array. + long generic_count; // how often is `_mi_malloc_generic` called? + long generic_collect_count; // how often is `_mi_malloc_generic` called without collecting? + mi_heap_t* next; // list of heaps per thread + long page_full_retain; // how many full pages can be retained per queue (before abondoning them) + bool allow_page_reclaim; // `true` if this heap should not reclaim abandoned pages + bool allow_page_abandon; // `true` if this heap can abandon pages to reduce memory footprint + uint8_t tag; // custom tag, can be used for separating heaps based on the object types + #if MI_GUARDED + size_t guarded_size_min; // minimal size for guarded objects + size_t guarded_size_max; // maximal size for guarded objects + size_t guarded_sample_rate; // sample rate (set to 0 to disable guarded pages) + size_t guarded_sample_count; // current sample count (counting down to 0) + #endif + mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. + mi_page_queue_t pages[MI_BIN_COUNT]; // queue of pages for each size class (or "bin") + mi_memid_t memid; // provenance of the heap struct itself (meta or os) +}; + + +// ------------------------------------------------------ +// Sub processes do not reclaim or visit pages from other sub processes. +// These are essentially the static variables of a process, and +// usually there is only one subprocess. This can be used for example +// by CPython to have seperate interpreters within one process. +// Each thread can only belong to one subprocess. +// ------------------------------------------------------ + +#define MI_MAX_ARENAS (160) // Limited for now (and takes up .bss).. but arena's scale up exponentially (see `mi_arena_reserve`) + // 160 arenas is enough for ~2 TiB memory + +typedef struct mi_subproc_s { + _Atomic(size_t) arena_count; // current count of arena's + _Atomic(mi_arena_t*) arenas[MI_MAX_ARENAS]; // arena's of this sub-process + mi_lock_t arena_reserve_lock; // lock to ensure arena's get reserved one at a time + _Atomic(int64_t) purge_expire; // expiration is set if any arenas can be purged + + _Atomic(size_t) abandoned_count[MI_BIN_COUNT]; // total count of abandoned pages for this sub-process + mi_page_t* os_abandoned_pages; // list of pages that OS allocated and not in an arena (only used if `mi_option_visit_abandoned` is on) + mi_lock_t os_abandoned_pages_lock; // lock for the os abandoned pages list (this lock protects list operations) + + mi_memid_t memid; // provenance of this memory block (meta or OS) + mi_stats_t stats; // sub-process statistics (tld stats are merged in on thread termination) +} mi_subproc_t; + + + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// Milliseconds as in `int64_t` to avoid overflows +typedef int64_t mi_msecs_t; + +// Thread local data +struct mi_tld_s { + mi_threadid_t thread_id; // thread id of this thread + size_t thread_seq; // thread sequence id (linear count of created threads) + int numa_node; // thread preferred numa node + mi_subproc_t* subproc; // sub-process this thread belongs to. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + bool is_in_threadpool; // true if this thread is part of a threadpool (and can run arbitrary tasks) + mi_stats_t stats; // statistics + mi_memid_t memid; // provenance of the tld memory itself (meta or OS) +}; + + +/* ---------------------------------------------------------------------------- + Arenas are fixed area's of OS memory from which we can allocate + large blocks (>= MI_ARENA_MIN_BLOCK_SIZE). + In contrast to the rest of mimalloc, the arenas are shared between + threads and need to be accessed using atomic operations (using atomic `mi_bitmap_t`'s). + + Arenas are also used to for huge OS page (1GiB) reservations or for reserving + OS memory upfront which can be improve performance or is sometimes needed + on embedded devices. We can also employ this with WASI or `sbrk` systems + to reserve large arenas upfront and be able to reuse the memory more effectively. +-----------------------------------------------------------------------------*/ + +#define MI_ARENA_BIN_COUNT (MI_BIN_COUNT) +#define MI_ARENA_MIN_SIZE (MI_BCHUNK_BITS * MI_ARENA_SLICE_SIZE) // 32 MiB (or 8 MiB on 32-bit) +#define MI_ARENA_MAX_SIZE (MI_BITMAP_MAX_BIT_COUNT * MI_ARENA_SLICE_SIZE) + +typedef struct mi_bitmap_s mi_bitmap_t; // atomic bitmap (defined in `src/bitmap.h`) +typedef struct mi_bbitmap_s mi_bbitmap_t; // atomic binned bitmap (defined in `src/bitmap.h`) + +// A memory arena +typedef struct mi_arena_s { + mi_memid_t memid; // provenance of the memory area + mi_subproc_t* subproc; // subprocess this arena belongs to (`this 'element-of' this->subproc->arenas`) + + size_t slice_count; // total size of the area in arena slices (of `MI_ARENA_SLICE_SIZE`) + size_t info_slices; // initial slices reserved for the arena bitmaps + int numa_node; // associated NUMA node + bool is_exclusive; // only allow allocations if specifically for this arena + _Atomic(mi_msecs_t) purge_expire; // expiration time when slices can be purged from `slices_purge`. + mi_commit_fun_t* commit_fun; // custom commit/decommit memory + void* commit_fun_arg; // user argument for a custom commit function + + mi_bbitmap_t* slices_free; // is the slice free? (a binned bitmap with size classes) + mi_bitmap_t* slices_committed; // is the slice committed? (i.e. accessible) + mi_bitmap_t* slices_dirty; // is the slice potentially non-zero? + mi_bitmap_t* slices_purge; // slices that can be purged + mi_bitmap_t* pages; // all registered pages (abandoned and owned) + mi_bitmap_t* pages_abandoned[MI_ARENA_BIN_COUNT]; // abandoned pages per size bin (a set bit means the start of the page) + // the full queue contains abandoned full pages + // followed by the bitmaps (whose sizes depend on the arena size) + // note: when adding bitmaps revise `mi_arena_info_slices_needed` +} mi_arena_t; + + +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ + +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + +/* ----------------------------------------------------------- + Debug constants +----------------------------------------------------------- */ + +#if !defined(MI_DEBUG_UNINIT) +#define MI_DEBUG_UNINIT (0xD0) +#endif +#if !defined(MI_DEBUG_FREED) +#define MI_DEBUG_FREED (0xDF) +#endif +#if !defined(MI_DEBUG_PADDING) +#define MI_DEBUG_PADDING (0xDE) +#endif + + +#endif // MI_TYPES_H diff --git a/depends/mimalloc/mimalloc.pc.in b/depends/mimalloc/mimalloc.pc.in index 36da2038845c..80922256ae0f 100644 --- a/depends/mimalloc/mimalloc.pc.in +++ b/depends/mimalloc/mimalloc.pc.in @@ -1,11 +1,11 @@ prefix=@CMAKE_INSTALL_PREFIX@ -libdir=@libdir_for_pc_file@ -includedir=@includedir_for_pc_file@ +libdir=@mi_pc_libdir@ +includedir=@mi_pc_includedir@ Name: @PROJECT_NAME@ Description: A compact general purpose allocator with excellent performance Version: @PACKAGE_VERSION@ URL: https://github.com/microsoft/mimalloc/ -Libs: -L${libdir} -lmimalloc -Libs.private: @pc_libraries@ +Libs: -L${libdir} -l@mi_libname@ +Libs.private: @mi_pc_libraries@ Cflags: -I${includedir} diff --git a/depends/mimalloc/readme.md b/depends/mimalloc/readme.md index fe2ead6905b9..71aaf7a24e83 100644 --- a/depends/mimalloc/readme.md +++ b/depends/mimalloc/readme.md @@ -9,24 +9,30 @@ mimalloc (pronounced "me-malloc") is a general purpose allocator with excellent [performance](#performance) characteristics. -Initially developed by Daan Leijen for the run-time systems of the +Initially developed by Daan Leijen for the runtime systems of the [Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. -Latest release tag: `v2.0.7` (2022-11-03). -Latest stable tag: `v1.7.7` (2022-11-03). +Latest release : `v3.1.4` (beta) (2025-06-09). +Latest v2 release: `v2.2.4` (2025-06-09). +Latest v1 release: `v1.9.4` (2024-06-09). mimalloc is a drop-in replacement for `malloc` and can be used in other programs without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: ``` > LD_PRELOAD=/usr/lib/libmimalloc.so myprogram ``` -It also has an easy way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include: +It also includes a way to dynamically override the default allocator in [Windows](#override_on_windows). +Notable aspects of the design include: -- __small and consistent__: the library is about 8k LOC using simple and +- __small and consistent__: the library is about 10k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic _heartbeat_ and deferred freeing (for bounded worst-case times with reference counting). + Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS, + Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding. + At the same time, it is an industrial strength allocator that runs (very) large scale + distributed services on thousands of machines with excellent worst case latencies. - __free list sharding__: instead of one big free list (per size class) we have many smaller lists per "mimalloc page" which reduces fragmentation and increases locality -- @@ -36,13 +42,13 @@ It also has an easy way to override the default allocator in [Windows](#override per mimalloc page, but for each page we have multiple free lists. In particular, there is one list for thread-local `free` operations, and another one for concurrent `free` operations. Free-ing from another thread can now be a single CAS without needing - sophisticated coordination between threads. Since there will be + sophisticated coordination between threads. Since there will be thousands of separate free lists, contention is naturally distributed over the heap, and the chance of contending on a single location will be low -- this is quite similar to randomized algorithms like skip lists where adding a random oracle removes the need for a more complex algorithm. -- __eager page reset__: when a "page" becomes empty (with increased chance - due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") +- __eager page purging__: when a "page" becomes empty (with increased chance + due to free list sharding) the memory is marked to the OS as unused (reset or decommitted) reducing (real) memory pressure and fragmentation, especially in long running programs. - __secure__: _mimalloc_ can be built in secure mode, adding guard pages, @@ -50,35 +56,83 @@ It also has an easy way to override the default allocator in [Windows](#override heap vulnerabilities. The performance penalty is usually around 10% on average over our benchmarks. - __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions. - A heap can be destroyed at once instead of deallocating each object separately. + A heap can be destroyed at once instead of deallocating each object separately. - __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation - times (_wcat_), bounded space overhead (~0.2% meta-data, with low internal fragmentation), - and has no internal points of contention using only atomic operations. + times (_wcat_) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low + internal fragmentation), and has no internal points of contention using only atomic operations. - __fast__: In our benchmarks (see [below](#performance)), _mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), - and often uses less memory. A nice property - is that it does consistently well over a wide range of benchmarks. There is also good huge OS page - support for larger server programs. + and often uses less memory. A nice property is that it does consistently well over a wide range + of benchmarks. There is also good huge OS page support for larger server programs. The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API. -You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results. +You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results. -Enjoy! +Enjoy! ### Branches -* `master`: latest stable release (based on `dev-slice`). -* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. -* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev`. +* `main`: latest stable release (still based on `dev2`). +* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. +* `dev2`: development branch for mimalloc v2. This branch is downstream of `dev` + (and is essentially equal to `dev` except for `src/segment.c`). Uses larger sliced segments to manage + mimalloc pages that can reduce fragmentation. +* `dev3`: development branch for mimalloc v3 beta. This branch is downstream of `dev`. This version + simplifies the lock-free ownership of previous versions, and improves sharing of memory between + threads. On certain large workloads this version may use (much) less memory. ### Releases -Note: the `v2.x` version has a new algorithm for managing internal mimalloc pages that tends to use reduce memory usage - and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance - (see [below](#performance)); please report if you observe any significant performance regression. - -* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind] for leak testing and heap block overflow detection. Initial - support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . +* 2025-06-09, `v1.9.4`, `v2.2.4`, `v3.1.4` (beta) : Some important bug fixes, including a case where OS memory + was not always fully released. Improved v3 performance, build on XBox, fix build on Android, support interpose + for older macOS versions, use MADV_FREE_REUSABLE on macOS, always check commit success, better support for Windows + fixed TLS offset, etc. +* 2025-03-28, `v1.9.3`, `v2.2.3`, `v3.0.3` (beta) : Various small bug and build fixes, including: + fix arm32 pre v7 builds, fix mingw build, get runtime statistics, improve statistic commit counts, + fix execution on non BMI1 x64 systems. +* 2025-03-06, `v1.9.2`, `v2.2.2`, `v3.0.2-beta`: Various small bug and build fixes. + Add `mi_options_print`, `mi_arenas_print`, and the experimental `mi_stat_get` and `mi_stat_get_json`. + Add `mi_thread_set_in_threadpool` and `mi_heap_set_numa_affinity` (v3 only). Add vcpkg portfile. + Upgrade mimalloc-redirect to v1.3.2. `MI_OPT_ARCH` is off by default now but still assumes armv8.1-a on arm64 + for fast atomic operations. Add QNX support. +* 2025-01-03, `v1.8.9`, `v2.1.9`, `v3.0.1-alpha`: Interim release. Support Windows arm64. New [guarded](#guarded) build that can place OS + guard pages behind objects to catch buffer overflows as they occur. + Many small fixes: build on Windows arm64, cygwin, riscV, and dragonfly; fix Windows static library initialization to account for + thread local destructors (in Rust/C++); macOS tag change; macOS TLS slot fix; improve stats; + consistent `mimalloc.dll` on Windows (instead of `mimalloc-override.dll`); fix mimalloc-redirect on Win11 H2; + add 0-byte to canary; upstream CPython fixes; reduce .bss size; allow fixed TLS slot on Windows for improved performance. +* 2024-05-21, `v1.8.7`, `v2.1.7`: Fix build issues on less common platforms. Started upstreaming patches + from the CPython [integration](https://github.com/python/cpython/issues/113141#issuecomment-2119255217). Upstream `vcpkg` patches. +* 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation. +* 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds. + Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size + directly available (and new `block_size_shift` to improve aligned block free-ing). + New approach to collection of abandoned segments: When + a thread terminates the segments it owns are abandoned (containing still live objects) and these can be + reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's + which is more concurrent (and more aggressive). Abandoned memory can now also be reclaimed if a thread frees an object in + an abandoned page (which can be disabled using `mi_option_abandoned_reclaim_on_free`). The option `mi_option_max_segment_reclaim` + gives a maximum percentage of abandoned segments that can be reclaimed per try (=10%). + +* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity + by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory + usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking. + +* 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms. + +* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision + with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS + abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. + +* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. + Support arbitrary large alignments (in particular for `std::pmr` pools). + Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). + Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). + Various small bug fixes. + +* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow + detection. Initial + support for attaching heaps to a specific memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . * 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix @@ -87,24 +141,10 @@ Note: the `v2.x` version has a new algorithm for managing internal mimalloc page * 2022-02-14, `v1.7.5`, `v2.0.5` (alpha): fix malloc override on Windows 11, fix compilation with musl, potentially reduced - committed memory, add `bin/minject` for Windows, + committed memory, add `bin/minject` for Windows, improved wasm support, faster aligned allocation, various small fixes. -* 2021-11-14, `v1.7.3`, `v2.0.3` (beta): improved WASM support, improved macOS support and performance (including - M1), improved performance for v2 for large objects, Python integration improvements, more standard - installation directories, various small fixes. - -* 2021-06-17, `v1.7.2`, `v2.0.2` (beta): support M1, better installation layout on Linux, fix - thread_id on Android, prefer 2-6TiB area for aligned allocation to work better on pre-windows 8, various small fixes. - -* 2021-04-06, `v1.7.1`, `v2.0.1` (beta): fix bug in arena allocation for huge pages, improved aslr on large allocations, initial M1 support (still experimental). - -* 2021-01-31, `v2.0.0`: beta release 2.0: new slice algorithm for managing internal mimalloc pages. - -* 2021-01-31, `v1.7.0`: stable release 1.7: support explicit user provided memory regions, more precise statistics, - improve macOS overriding, initial support for Apple M1, improved DragonFly support, faster memcpy on Windows, various small fixes. - * [Older release notes](#older-release-notes) Special thanks to: @@ -115,9 +155,9 @@ Special thanks to: memory model bugs using the [genMC] model checker. * Weipeng Liu (@pongba), Zhuowei Li, Junhua Wang, and Jakub Szymanski, for their early support of mimalloc and deployment at large scale services, leading to many improvements in the mimalloc algorithms for large workloads. -* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs +* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs in (early versions of) `mimalloc`. -* Manuel Pöter (@mpoeter) and Sam Gross(@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation. Sam also created the [no GIL](https://github.com/colesbury/nogil) Python fork which +* Manuel Pöter (@mpoeter) and Sam Gross(@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation. Sam also created the [no GIL](https://github.com/colesbury/nogil) Python fork which uses mimalloc internally. @@ -138,14 +178,14 @@ mimalloc is used in various large scale low-latency services and programs, for e ## Windows -Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build. -The `mimalloc` project builds a static library (in `out/msvc-x64`), while the -`mimalloc-override` project builds a DLL for overriding malloc +Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build. +The `mimalloc-lib` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override-dll` project builds a DLL for overriding malloc in the entire program. -## macOS, Linux, BSD, etc. +## Linux, macOS, BSD, etc. -We use [`cmake`](https://cmake.org)1 as the build system: +We use [`cmake`](https://cmake.org) as the build system: ``` > mkdir -p out/release @@ -168,32 +208,58 @@ maintains detailed statistics as: > cmake -DCMAKE_BUILD_TYPE=Debug ../.. > make ``` + This will name the shared library as `libmimalloc-debug.so`. -Finally, you can build a _secure_ version that uses guard pages, encrypted -free lists, etc., as: +Finally, you can build a _secure_ version that uses guard pages, encrypted free lists, etc., as: + ``` > mkdir -p out/secure > cd out/secure > cmake -DMI_SECURE=ON ../.. > make ``` + This will name the shared library as `libmimalloc-secure.so`. -Use `ccmake`2 instead of `cmake` -to see and customize all the available build options. +Use `cmake ../.. -LH` to see all the available build options. -Notes: -1. Install CMake: `sudo apt-get install cmake` -2. Install CCMake: `sudo apt-get install cmake-curses-gui` +The examples use the default compiler. If you like to use another, use: + +``` +> CC=clang CXX=clang++ cmake ../.. +``` + +## Cmake with Visual Studio + +You can also use cmake on Windows. Open a Visual Studio 2022 development prompt +and invoke `cmake` with the right [generator](https://cmake.org/cmake/help/latest/generator/Visual%20Studio%2017%202022.html) +and architecture, like: + +``` +> cmake ..\.. -G "Visual Studio 17 2022" -A x64 -DMI_OVERRIDE=ON +``` + +The cmake build type is specified when actually building, for example: + +``` +> cmake --build . --config=Release +``` + +You can also install the [LLVM toolset](https://learn.microsoft.com/en-us/cpp/build/clang-support-msbuild?view=msvc-170#install-1) +on Windows to build with the `clang-cl` compiler directly: + +``` +> cmake ../.. -G "Visual Studio 17 2022" -T ClangCl +``` -## Single source +## Single Source You can also directly build the single `src/static.c` file as part of your project without needing `cmake` at all. Make sure to also add the mimalloc `include` directory to the include path. -# Using the library +# Using the Library The preferred usage is including ``, linking with the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, @@ -205,7 +271,7 @@ mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist with other allocators linked to the same program. If you use `cmake`, you can simply use: ``` -find_package(mimalloc 1.4 REQUIRED) +find_package(mimalloc 1.8 REQUIRED) ``` in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: ``` @@ -218,8 +284,8 @@ target_link_libraries(myapp PUBLIC mimalloc-static) to link with the static library. See `test\CMakeLists.txt` for an example. For best performance in C++ programs, it is also recommended to override the -global `new` and `delete` operators. For convience, mimalloc provides -[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +global `new` and `delete` operators. For convenience, mimalloc provides +[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` interface. @@ -266,46 +332,60 @@ completely and redirect all calls to the _mimalloc_ library instead . ## Environment Options -You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), -or via environment variables: +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), or via environment variables: - `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. - `MIMALLOC_VERBOSE=1`: show verbose messages. - `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. -- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages that are not in use, to signal to the OS - that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server) - programs. By setting it to `0` this will no longer be done which can improve performance for batch-like programs. - As an alternative, the `MIMALLOC_RESET_DELAY=` can be set higher (100ms by default) to make the page - reset occur less frequently instead of turning it off completely. + +Advanced options: + +- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc + allocates segments and pages. Set this to 2 (default) to + only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems + as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). + Note that eager commit only increases the commit but not the actual the peak resident set + (rss) so it is generally ok to enable this. +- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge + OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which + can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when + a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher + value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. + Setting it to `-1` disables purging completely. +- `MIMALLOC_PURGE_DECOMMITS=1`: By default "purging" memory means unused memory is decommitted (`MEM_DECOMMIT` on Windows, + `MADV_DONTNEED` (which decresease rss immediately) on `mmap` systems). Set this to 0 to instead "reset" unused + memory on a purge (`MEM_RESET` on Windows, generally `MADV_FREE` (which does not decrease rss immediately) on `mmap` systems). + Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual + address ranges and decommits within those ranges (to make the underlying physical memory available to other processes). + +Further options for large workloads and services: + - `MIMALLOC_USE_NUMA_NODES=N`: pretend there are at most `N` NUMA nodes. If not set, the actual NUMA nodes are detected at runtime. Setting `N` to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed). -- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly - improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs - to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes +- `MIMALLOC_ALLOW_LARGE_OS_PAGES=0`: Set to 1 to use large OS pages (2 or 4MiB) when available; for some workloads this can significantly + improve performance. When this option is disabled (default), it also disables transparent huge pages (THP) for the process + (on Linux and Android). On Linux the default setting is 2 -- this enables the use of large pages through THP only. + Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs + to explicitly give permissions for large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). - -- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB _huge_ OS pages. This reserves the huge pages at +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. - Usually it is better to not use - `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving + Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large + OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). - Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). + Note that we usually need to explicitly give permission for huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) of a thread to not allocate in the huge OS pages; this prevents threads that are short lived - and allocate just a little to take up space in the huge OS page area (which cannot be reset). + and allocate just a little to take up space in the huge OS page area (which cannot be purged as huge OS pages are pinned + to physical memory). The huge pages are usually allocated evenly among NUMA nodes. - We can use `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N` where `N` is the numa node (starting at 0) to allocate all - the huge pages at a specific numa node instead. + We can use `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N` where `N` is the numa node (starting at 0) to allocate all + the huge pages at a specific numa node instead. Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write for all pages in the original process including the huge OS pages. When any memory is now written in that area, the @@ -333,50 +413,38 @@ As always, evaluate with care as part of an overall security strategy as all of ## Debug Mode -When _mimalloc_ is built using debug mode, various checks are done at runtime to catch development errors. +When _mimalloc_ is built using debug mode, (`-DCMAKE_BUILD_TYPE=Debug`), +various checks are done at runtime to catch development errors. - Statistics are maintained in detail for each object size. They can be shown using `MIMALLOC_SHOW_STATS=1` at runtime. - All objects have padding at the end to detect (byte precise) heap block overflows. - Double free's, and freeing invalid heap pointers are detected. - Corrupted free-lists and some forms of use-after-free are detected. -## Valgrind +## Guarded Mode -Generally, we recommend using the standard allocator with the amazing [Valgrind] tool (and -also for other address sanitizers). -However, it is possible to build mimalloc with Valgrind support. This has a small performance -overhead but does allow detecting memory leaks and byte-precise buffer overflows directly on final -executables. To build with valgrind support, use the `MI_VALGRIND=ON` cmake option: +_mimalloc_ can be build in guarded mode using the `-DMI_GUARDED=ON` flags in `cmake`. +This enables placing OS guard pages behind certain object allocations to catch buffer overflows as they occur. +This can be invaluable to catch buffer-overflow bugs in large programs. However, it also means that any object +allocated with a guard page takes at least 8 KiB memory for the guard page and its alignment. As such, allocating +a guard page for every allocation may be too expensive both in terms of memory, and in terms of performance with +many system calls. Therefore, there are various environment variables (and options) to tune this: -``` -> cmake ../.. -DMI_VALGRIND=ON -``` +- `MIMALLOC_GUARDED_SAMPLE_RATE=N`: Set the sample rate to `N` (by default 4000). This mode places a guard page + behind every `N` suitable object allocations (per thread). Since the performance in guarded mode without placing + guard pages is close to release mode, this can be used to enable guard pages even in production to catch latent + buffer overflow bugs. Set the sample rate to `1` to guard every object, and to `0` to place no guard pages at all. -This can also be combined with secure mode or debug mode. -You can then run your programs directly under valgrind: +- `MIMALLOC_GUARDED_SAMPLE_SEED=N`: Start sampling at `N` (by default random). Can be used to reproduce a buffer + overflow if needed. -``` -> valgrind -``` +- `MIMALLOC_GUARDED_MIN=N`, `MIMALLOC_GUARDED_MAX=N`: Minimal and maximal _rounded_ object sizes for which a guard + page is considered (`0` and `1GiB` respectively). If you suspect a buffer overflow occurs with an object of size + 141, set the minimum and maximum to `148` and the sample rate to `1` to have all of those guarded. -If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly), -you also need to tell `valgrind` to not intercept those calls itself, and use: - -``` -> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- -``` - -By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed -used and not the standard allocator. Even though the [Valgrind option][valgrind-soname] -is called `--soname-synonyms`, this also -works when overriding with a static library or object file. Unfortunately, it is not possible to -dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`. -See also the `test/test-wrong.c` file to test with `valgrind`. - -Valgrind support is in its initial development -- please report any issues. - -[Valgrind]: https://valgrind.org/ -[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms +- `MIMALLOC_GUARDED_PRECISE=1`: If we have an object of size 13, we would usually place it an aligned 16 bytes in + front of the guard page. Using `MIMALLOC_GUARDED_PRECISE` places it exactly 13 bytes before a page so that even + a 1 byte overflow is detected. This violates the C/C++ minimal alignment guarantees though so use with care. # Overriding Standard Malloc @@ -387,7 +455,7 @@ Overriding the standard `malloc` (and `new`) can be done either _dynamically_ or This is the recommended way to override the standard malloc interface. -### Override on Linux, BSD +### Dynamic Override on Linux, BSD On these ELF-based systems we preload the mimalloc shared library so all calls to the standard `malloc` interface are @@ -406,7 +474,7 @@ or run with the debug version to get detailed statistics: > env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram ``` -### Override on MacOS +### Dynamic Override on MacOS On macOS we can also preload the mimalloc shared library so all calls to the standard `malloc` interface are @@ -419,55 +487,163 @@ Note that certain security restrictions may apply when doing this from the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). -### Override on Windows +### Dynamic Override on Windows + +We use a separate redirection DLL to override mimalloc on Windows +such that we redirect all malloc/free calls that go through the (dynamic) C runtime allocator, +including those from other DLL's or libraries. As it intercepts all allocation calls on a low level, +it can be used on large programs that include other 3rd party components. +There are four requirements to make the overriding work well: -Overriding on Windows is robust and has the -particular advantage to be able to redirect all malloc/free calls that go through -the (dynamic) C runtime allocator, including those from other DLL's or libraries. +1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). -The overriding on Windows requires that you link your program explicitly with -the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). -Also, the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put -in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency). -The redirection DLL ensures that all calls to the C runtime malloc API get redirected to -mimalloc (in `mimalloc-override.dll`). +2. Link your program explicitly with the `mimalloc.dll.lib` export library for the `mimalloc.dll`. + (which must be compiled with `-DMI_OVERRIDE=ON`, which is the default though). + To ensure the `mimalloc.dll` is actually loaded at run-time it is easiest + to insert some call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/include:mi_version` switch on the linker command, or + similarly, `#pragma comment(linker, "/include:mi_version")` in some source file). + See the `mimalloc-test-override` project for an example on how to use this. -To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some -call to the mimalloc API in the `main` function, like `mi_version()` -(or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project -for an example on how to use this. For best performance on Windows with C++, it -is also recommended to also override the `new`/`delete` operations (by including -[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) a single(!) source file in your project). +3. The `mimalloc-redirect.dll` must be put in the same directory as the main + `mimalloc.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get + redirected to mimalloc functions (which reside in `mimalloc.dll`). + +4. Ensure the `mimalloc.dll` comes as early as possible in the import + list of the final executable (so it can intercept all potential allocations). + You can use `minject -l ` to check this if needed. + +For best performance on Windows with C++, it is also recommended to also override +the `new`/`delete` operations (by including [`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) +a single(!) source file in your project). The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic -overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully +redirected. -(Note: in principle, it is possible to even patch existing executables without any recompilation -if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` -into the import table (and put `mimalloc-redirect.dll` in the same folder) -Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388)). +For different platforms than x64, you may need a specific [redirection dll](bin). +Furthermore, we cannot always re-link an executable or ensure `mimalloc.dll` comes +first in the import table. In such cases the [`minject`](bin) tool can be used +to patch the executable's import tables. ## Static override On Unix-like systems, you can also statically link with _mimalloc_ to override the standard malloc interface. The recommended way is to link the final program with the -_mimalloc_ single object file (`mimalloc-override.o`). We use +_mimalloc_ single object file (`mimalloc.o`). We use an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the _mimalloc_ library, link it as the first object file. For example: + ``` -> gcc -o myprogram mimalloc-override.o myfile1.c ... +> gcc -o myprogram mimalloc.o myfile1.c ... ``` Another way to override statically that works on all platforms, is to link statically to mimalloc (as shown in the introduction) and include a header file in each source file that re-defines `malloc` etc. to `mi_malloc`. -This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-override.h). This only works reliably though if all sources are +This is provided by [`mimalloc-override.h`](include/mimalloc-override.h). This only works +reliably though if all sources are under your control or otherwise mixing of pointers from different heaps may occur! +# Tools + +Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc +can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. +Moreover, it can be build to support Windows event tracing ([ETW]). +This has a small performance overhead but does allow detecting memory leaks and byte-precise +buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools. + +## Valgrind + +To build with [valgrind] support, use the `MI_TRACK_VALGRIND=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_VALGRIND=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs directly under valgrind: + +``` +> valgrind +``` + +If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly), +you also need to tell `valgrind` to not intercept those calls itself, and use: + +``` +> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- +``` + +By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed +used and not the standard allocator. Even though the [Valgrind option][valgrind-soname] +is called `--soname-synonyms`, this also works when overriding with a static library or object file. +To dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`, use: + +``` +> valgrind --trace-children=yes --soname-synonyms=somalloc=*mimalloc* /usr/bin/env LD_PRELOAD=/usr/lib/libmimalloc.so -- +``` + +See also the `test/test-wrong.c` file to test with `valgrind`. + +Valgrind support is in its initial development -- please report any issues. + +[Valgrind]: https://valgrind.org/ +[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms + +## ASAN + +To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_ASAN=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs as:' + +``` +> ASAN_OPTIONS=verbosity=1 +``` + +When you link a program with an address sanitizer build of mimalloc, you should +generally compile that program too with the address sanitizer enabled. +For example, assuming you build mimalloc in `out/debug`: + +``` +clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address +``` + +Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example) +it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`. +Address sanitizer support is in its initial development -- please report any issues. + +[asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer + +## ETW + +Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though +mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. + +You can then capture an allocation trace using the Windows performance recorder (WPR), using the +`src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use: +``` +> wpr -start src\prim\windows\etw-mimalloc.wprp -filemode +> +> wpr -stop .etl +``` +and then open `.etl` in the Windows Performance Analyzer (WPA), or +use a tool like [TraceControl] that is specialized for analyzing mimalloc traces. + +[ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows +[TraceControl]: https://github.com/xinglonghe/TraceControl + + # Performance Last update: 2021-01-30 @@ -573,7 +749,7 @@ The _alloc-test_, by [OLogN Technologies AG](http://ithare.com/testing-memory-allocators-ptmalloc2-tcmalloc-hoard-jemalloc-while-trying-to-simulate-real-world-loads/), is a very allocation intensive benchmark doing millions of allocations in various size classes. The test is scaled such that when an allocator performs almost identically on _alloc-test1_ as _alloc-testN_ it -means that it scales linearly. +means that it scales linearly. The _sh6bench_ and _sh8bench_ benchmarks are developed by [MicroQuill](http://www.microquill.com/) as part of SmartHeap. @@ -728,6 +904,16 @@ provided by the bot. You will only need to do this once across all repos using o # Older Release Notes +* 2021-11-14, `v1.7.3`, `v2.0.3` (beta): improved WASM support, improved macOS support and performance (including + M1), improved performance for v2 for large objects, Python integration improvements, more standard + installation directories, various small fixes. +* 2021-06-17, `v1.7.2`, `v2.0.2` (beta): support M1, better installation layout on Linux, fix + thread_id on Android, prefer 2-6TiB area for aligned allocation to work better on pre-windows 8, various small fixes. +* 2021-04-06, `v1.7.1`, `v2.0.1` (beta): fix bug in arena allocation for huge pages, improved aslr on large allocations, initial M1 support (still experimental). +* 2021-01-31, `v2.0.0`: beta release 2.0: new slice algorithm for managing internal mimalloc pages. +* 2021-01-31, `v1.7.0`: stable release 1.7: support explicit user provided memory regions, more precise statistics, + improve macOS overriding, initial support for Apple M1, improved DragonFly support, faster memcpy on Windows, various small fixes. + * 2020-09-24, `v1.6.7`: stable release 1.6: using standard C atomics, passing tsan testing, improved handling of failing to commit on Windows, add [`mi_process_info`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc.h#L156) api call. * 2020-08-06, `v1.6.4`: stable release 1.6: improved error recovery in low-memory situations, @@ -749,9 +935,9 @@ provided by the bot. You will only need to do this once across all repos using o more eager concurrent free, addition of STL allocator, fixed potential memory leak. * 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode. + * 2019-12-22, `v1.2.2`: stable release 1.2: minor updates. * 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. * 2019-10-07, `v1.1.0`: stable release 1.1. * 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support. * 2019-08-10, `v1.0.6`: pre-release 6: various performance improvements. - diff --git a/depends/mimalloc/src/alloc-aligned.c b/depends/mimalloc/src/alloc-aligned.c index 9614aa092ee0..6b8bc7620f5c 100644 --- a/depends/mimalloc/src/alloc-aligned.c +++ b/depends/mimalloc/src/alloc-aligned.c @@ -6,99 +6,204 @@ terms of the MIT license. A copy of the license can be found in the file -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap -#include // memset +#include // memset // ------------------------------------------------------ // Aligned Allocation // ------------------------------------------------------ -// Fallback primitive aligned allocation -- split out for better codegen -static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept -{ - mi_assert_internal(size <= PTRDIFF_MAX); - mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX); +static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) { + // objects up to `MI_PAGE_MIN_BLOCK_ALIGN` are always allocated aligned to their size + mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0)); + if (alignment > size) return false; + const size_t bsize = mi_good_size(size); + const bool ok = (bsize <= MI_PAGE_MAX_START_BLOCK_ALIGN2 && _mi_is_power_of_two(bsize)); + if (ok) { mi_assert_internal((bsize & (alignment-1)) == 0); } // since both power of 2 and alignment <= size + return ok; +} - const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` - const size_t padsize = size + MI_PADDING_SIZE; +#if MI_GUARDED +static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept { + // use over allocation for guarded blocksl + mi_assert_internal(alignment > 0 && alignment < MI_PAGE_MAX_OVERALLOC_ALIGN); + const size_t oversize = size + alignment - 1; + void* base = _mi_heap_malloc_guarded(heap, oversize, zero); + void* p = _mi_align_up_ptr(base, alignment); + mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size); + mi_assert_internal(mi_usable_size(p) >= size); + mi_assert_internal(_mi_is_aligned(p, alignment)); + return p; +} - // use regular allocation if it is guaranteed to fit the alignment constraints - if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) { - void* p = _mi_heap_malloc_zero(heap, size, zero); - mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); - return p; - } +static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) { + const size_t rate = heap->guarded_sample_rate; + // only write if `rate!=0` so we don't write to the constant `_mi_heap_empty` + if (rate != 0) { heap->guarded_sample_rate = 0; } + void* p = _mi_heap_malloc_zero(heap, size, zero); + if (rate != 0) { heap->guarded_sample_rate = rate; } + return p; +} +#else +static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) { + return _mi_heap_malloc_zero(heap, size, zero); +} +#endif - // otherwise over-allocate - const size_t oversize = size + alignment - 1; - void* p = _mi_heap_malloc_zero(heap, oversize, zero); - if (p == NULL) return NULL; +// Fallback aligned allocation that over-allocates -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + mi_assert_internal(size <= (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)); + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + + void* p; + size_t oversize; + if mi_unlikely(alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) { + // use OS allocation for large alignments and allocate inside a singleton page (not in an arena) + // This can support alignments >= MI_PAGE_ALIGN by ensuring the object can be aligned + // in the first (and single) page such that the page info is `MI_PAGE_ALIGN` bytes before it (and can be found in the _mi_page_map). + if mi_unlikely(offset != 0) { + // todo: cannot support offset alignment for very large alignments yet + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation with a large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); + #endif + return NULL; + } + oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); + // note: no guarded as alignment > 0 + p = _mi_heap_malloc_zero_ex(heap, oversize, zero, alignment); // the page block size should be large enough to align in the single huge page block + if (p == NULL) return NULL; + } + else { + // otherwise over-allocate + oversize = (size < MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : size) + alignment - 1; // adjust for size <= 16; with size 0 and aligment 64k, we would allocate a 64k block and pointing just beyond that. + p = mi_heap_malloc_zero_no_guarded(heap, oversize, zero); + if (p == NULL) return NULL; + } + mi_page_t* page = _mi_ptr_page(p); // .. and align within the allocation - uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask); - mi_assert_internal(adjust <= alignment); - void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust)); - if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true); + const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` + const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask; + const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset); + mi_assert_internal(adjust < alignment); + void* aligned_p = (void*)((uintptr_t)p + adjust); + if (aligned_p != p) { + mi_page_set_has_aligned(page, true); + #if MI_GUARDED + // set tag to aligned so mi_usable_size works with guard pages + if (adjust >= sizeof(mi_block_t)) { + mi_block_t* const block = (mi_block_t*)p; + block->next = MI_BLOCK_TAG_ALIGNED; + } + #endif + _mi_padding_shrink(page, (mi_block_t*)p, adjust + size); + } + // todo: expand padding if overallocated ? + + mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); - mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)); + mi_assert_internal(mi_usable_size(aligned_p)>=size); + mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); + #if MI_DEBUG > 1 + mi_page_t* const apage = _mi_ptr_page(aligned_p); + void* unalign_p = _mi_page_ptr_unalign(apage, aligned_p); + mi_assert_internal(p == unalign_p); + #endif + + // now zero the block if needed + //if (alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) { + // // for the tracker, on huge aligned allocations only from the start of the large block is defined + // mi_track_mem_undefined(aligned_p, size); + // if (zero) { + // _mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p)); + // } + //} - #if MI_TRACK_ENABLED if (p != aligned_p) { - mi_track_free(p); - mi_track_malloc(aligned_p,size,zero); - } - else { - mi_track_resize(aligned_p,oversize,size); + mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); + #if MI_GUARDED + mi_track_mem_defined(p, sizeof(mi_block_t)); + #endif } - #endif return aligned_p; } -// Primitive aligned allocation -static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +// Generic primitive aligned allocation -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept { - // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. - mi_assert(alignment > 0); - if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see ) + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + // we don't allocate more than MI_MAX_ALLOC_SIZE (see ) + if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { #if MI_DEBUG > 0 - _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); + _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); #endif return NULL; } - if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers) - #if MI_DEBUG > 0 - _mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment); - #endif - return NULL; + + // use regular allocation if it is guaranteed to fit the alignment constraints. + // this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist + // a page with the right block size, and if we always use the over-alloc fallback that would never happen. + if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) { + void* p = mi_heap_malloc_zero_no_guarded(heap, size, zero); + mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); + const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0; + if mi_likely(is_aligned_or_null) { + return p; + } + else { + // this should never happen if the `mi_malloc_is_naturally_aligned` check is correct.. + mi_assert(false); + mi_free(p); + } } - if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see ) + + // fall back to over-allocation + return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero); +} + + +// Primitive aligned allocation +static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. + if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see ) #if MI_DEBUG > 0 - _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); + _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); #endif return NULL; } - const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` - const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check + + #if MI_GUARDED + if (offset==0 && alignment < MI_PAGE_MAX_OVERALLOC_ALIGN && mi_heap_malloc_use_guarded(heap,size)) { + return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero); + } + #endif // try first if there happens to be a small block available with just the right alignment - if mi_likely(padsize <= MI_SMALL_SIZE_MAX) { + // since most small power-of-2 blocks (under MI_PAGE_MAX_BLOCK_START_ALIGN2) are already + // naturally aligned this can be often the case. + if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) { + const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` + const size_t padsize = size + MI_PADDING_SIZE; mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); - const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0; - if mi_likely(page->free != NULL && is_aligned) - { - #if MI_STAT>1 - mi_heap_stat_increase(heap, malloc, size); - #endif - void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc - mi_assert_internal(p != NULL); - mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); - mi_track_malloc(p,size,zero); - return p; + if mi_likely(page->free != NULL) { + const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0; + if mi_likely(is_aligned) + { + void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen + mi_assert_internal(p != NULL); + mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); + mi_track_malloc(p,size,zero); + return p; + } } } - // fallback - return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero); + + // fallback to generic aligned allocation + return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero); } @@ -111,22 +216,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* he } mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { - #if !MI_PADDING - // without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`) - if (!_mi_is_power_of_two(alignment)) return NULL; - if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX) - #else - // with padding, we can only guarantee this for fixed alignments - if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2))) - && size <= MI_SMALL_SIZE_MAX) - #endif - { - // fast path for common alignment and size - return mi_heap_malloc_small(heap, size); - } - else { - return mi_heap_malloc_aligned_at(heap, size, alignment, 0); - } + return mi_heap_malloc_aligned_at(heap, size, alignment, 0); } // ------------------------------------------------------ @@ -152,27 +242,27 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, } mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset); + return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); } mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { - return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment); + return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment); } mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset); + return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); } mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { - return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment); + return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment); } mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset); + return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset); } mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept { - return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment); + return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment); } @@ -190,19 +280,13 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne return p; // reallocation still fits, is aligned and not more than 50% waste } else { + // note: we don't zero allocate upfront so we only zero initialize the expanded part void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); if (newp != NULL) { if (zero && newsize > size) { - const mi_page_t* page = _mi_ptr_page(newp); - if (page->is_zero) { - // already zero initialized - mi_assert_expensive(mi_mem_is_zero(newp,newsize)); - } - else { - // also set last word in the previous allocation to zero to ensure any padding is zero-initialized - size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); - memset((uint8_t*)newp + start, 0, newsize - start); - } + // also set last word in the previous allocation to zero to ensure any padding is zero-initialized + size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); + _mi_memzero((uint8_t*)newp + start, newsize - start); } _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); mi_free(p); // only free if successful @@ -247,26 +331,27 @@ mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_ } mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset); + return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); } mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { - return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment); + return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); } mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset); + return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); } mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { - return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment); + return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); } mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { - return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset); + return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset); } mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { - return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment); + return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment); } + diff --git a/depends/mimalloc/src/alloc-override.c b/depends/mimalloc/src/alloc-override.c index 9534e9d57b1f..52ab69c5389c 100644 --- a/depends/mimalloc/src/alloc-override.c +++ b/depends/mimalloc/src/alloc-override.c @@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file #error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)" #endif -#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32)) +#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32)) #if defined(__APPLE__) #include @@ -23,7 +23,7 @@ mi_decl_externc size_t malloc_good_size(size_t size); #endif // helper definition for C override of C++ new -typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; +typedef void* mi_nothrow_t; // ------------------------------------------------------ // Override system malloc @@ -43,19 +43,25 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; #define MI_FORWARD0(fun,x) MI_FORWARD(fun) #define MI_FORWARD02(fun,x,y) MI_FORWARD(fun) #else - // otherwise use forwarding by calling our `mi_` function - #define MI_FORWARD1(fun,x) { return fun(x); } + // otherwise use forwarding by calling our `mi_` function + #define MI_FORWARD1(fun,x) { return fun(x); } #define MI_FORWARD2(fun,x,y) { return fun(x,y); } #define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); } #define MI_FORWARD0(fun,x) { fun(x); } #define MI_FORWARD02(fun,x,y) { fun(x,y); } #endif -#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE) - // define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for + +#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE) + // define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for // functions that are interposed (or the interposing does not work) #define MI_OSX_IS_INTERPOSED + mi_decl_externc size_t mi_malloc_size_checked(void *p) { + if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); + } + // use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1` // See: struct mi_interpose_s { @@ -64,23 +70,21 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; }; #define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } #define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) - - __attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) = + + #define MI_INTERPOSE_DECLS(name) __attribute__((used)) static struct mi_interpose_s name[] __attribute__((section("__DATA, __interpose"))) + + MI_INTERPOSE_DECLS(_mi_interposes) = { MI_INTERPOSE_MI(malloc), MI_INTERPOSE_MI(calloc), MI_INTERPOSE_MI(realloc), MI_INTERPOSE_MI(strdup), - MI_INTERPOSE_MI(strndup), MI_INTERPOSE_MI(realpath), MI_INTERPOSE_MI(posix_memalign), MI_INTERPOSE_MI(reallocf), MI_INTERPOSE_MI(valloc), - MI_INTERPOSE_MI(malloc_size), + MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked), MI_INTERPOSE_MI(malloc_good_size), - #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 - MI_INTERPOSE_MI(aligned_alloc), - #endif #ifdef MI_OSX_ZONE // we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely MI_INTERPOSE_MI(free), @@ -91,6 +95,12 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; MI_INTERPOSE_FUN(vfree,mi_cfree), #endif }; + MI_INTERPOSE_DECLS(_mi_interposes_10_7) __OSX_AVAILABLE(10.7) = { + MI_INTERPOSE_MI(strndup), + }; + MI_INTERPOSE_DECLS(_mi_interposes_10_15) __OSX_AVAILABLE(10.15) = { + MI_INTERPOSE_MI(aligned_alloc), + }; #ifdef __cplusplus extern "C" { @@ -122,11 +132,19 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; // cannot override malloc unless using a dll. // we just override new/delete which does work in a static library. #else - // On all other systems forward to our API + // On all other systems forward allocation primitives to our API mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size) mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n) mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize) mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p) + // In principle we do not need to forward `strdup`/`strndup` but on some systems these do not use `malloc` internally (but a more primitive call) + // We only override if `strdup` is not a macro (as on some older libc's, see issue #885) + #if !defined(strdup) + mi_decl_export char* strdup(const char* str) MI_FORWARD1(mi_strdup, str) + #endif + #if !defined(strndup) && (!defined(__APPLE__) || (defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7)) + mi_decl_export char* strndup(const char* str, size_t n) MI_FORWARD2(mi_strndup, str, n) + #endif #endif #if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) @@ -168,34 +186,40 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } - + void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } #endif -#elif (defined(__GNUC__) || defined(__clang__)) +#elif (defined(__GNUC__) || defined(__clang__)) // ------------------------------------------------------ // Override by defining the mangled C++ names of the operators (as // used by GCC and CLang). // See // ------------------------------------------------------ - + void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[] void _ZdlPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) void _ZdaPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) + void _ZdlPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } void _ZdaPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } + + void _ZdlPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete(void*, std::nothrow_t const&) + void _ZdaPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete[](void*, std::nothrow_t const&) + void _ZdlPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete(void*, std::align_val_t, std::nothrow_t const&) + void _ZdaPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete[](void*, std::align_val_t, std::nothrow_t const&) #if (MI_INTPTR_SIZE==8) void* _Znwm(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit void* _Znam(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } - void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } void* _ZnwmSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) void* _ZnamSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } @@ -204,7 +228,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t; void* _Znwj(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit void* _Znaj(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit void* _ZnwjRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } - void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } void* _ZnwjSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) void* _ZnajSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } @@ -226,7 +250,7 @@ extern "C" { // Forward Posix/Unix calls as well void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize) size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p) - #if !defined(__ANDROID__) && !defined(__FreeBSD__) + #if !defined(__ANDROID__) && !defined(__FreeBSD__) && !defined(__DragonFly__) size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p) #else size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p) @@ -234,30 +258,41 @@ extern "C" { // No forwarding here due to aliasing/name mangling issues void* valloc(size_t size) { return mi_valloc(size); } - void vfree(void* p) { mi_free(p); } + void vfree(void* p) { mi_free(p); } size_t malloc_good_size(size_t size) { return mi_malloc_good_size(size); } int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); } - + // `aligned_alloc` is only available when __USE_ISOC11 is defined. + // Note: it seems __USE_ISOC11 is not defined in musl (and perhaps other libc's) so we only check + // for it if using glibc. // Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot // override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9. // Fortunately, in the case where `aligned_alloc` is declared as `static inline` it // uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves. - #if __USE_ISOC11 + #if !defined(__GLIBC__) || __USE_ISOC11 void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } #endif #endif // no forwarding here due to aliasing/name mangling issues -void cfree(void* p) { mi_free(p); } +void cfree(void* p) { mi_free(p); } void* pvalloc(size_t size) { return mi_pvalloc(size); } -void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); } -int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); } void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } +void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); } +// some systems define reallocarr so mark it as a weak symbol (#751) +mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); } + +#if defined(__wasi__) + // forward __libc interface (see PR #667) + void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size) + void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size) + void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc, p, size) + void __libc_free(void* p) MI_FORWARD0(mi_free, p) + void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } -#if defined(__GLIBC__) && defined(__linux__) - // forward __libc interface (needed for glibc-based Linux distributions) +#elif defined(__linux__) + // forward __libc interface (needed for glibc-based and musl-based Linux distributions) void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size) void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size) void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size) diff --git a/depends/mimalloc/src/alloc-posix.c b/depends/mimalloc/src/alloc-posix.c index 57e15d05d8db..225752fd8707 100644 --- a/depends/mimalloc/src/alloc-posix.c +++ b/depends/mimalloc/src/alloc-posix.c @@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file // for convenience and used when overriding these functions. // ------------------------------------------------------------------------ #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" // ------------------------------------------------------ // Posix & Unix functions definitions @@ -33,12 +33,12 @@ terms of the MIT license. A copy of the license can be found in the file mi_decl_nodiscard size_t mi_malloc_size(const void* p) mi_attr_noexcept { - //if (!mi_is_in_heap_region(p)) return 0; + // if (!mi_is_in_heap_region(p)) return 0; return mi_usable_size(p); } mi_decl_nodiscard size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept { - //if (!mi_is_in_heap_region(p)) return 0; + // if (!mi_is_in_heap_region(p)) return 0; return mi_usable_size(p); } @@ -56,7 +56,8 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept // Note: The spec dictates we should not modify `*p` on an error. (issue#27) // if (p == NULL) return EINVAL; - if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment + if ((alignment % sizeof(void*)) != 0) return EINVAL; // natural alignment + // it is also required that alignment is a power of 2 and > 0; this is checked in `mi_malloc_aligned` if (alignment==0 || !_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2 void* q = mi_malloc_aligned(size, alignment); if (q==NULL && size != 0) return ENOMEM; @@ -91,7 +92,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size #endif return NULL; } - */ + */ // C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned void* p = mi_malloc_aligned(size, alignment); mi_assert_internal(((uintptr_t)p % alignment) == 0); @@ -110,7 +111,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att errno = EINVAL; return EINVAL; } - void** op = (void**)p; + void** op = (void**)p; void* newp = mi_reallocarray(*op, count, size); if mi_unlikely(newp == NULL) { return errno; } *op = newp; @@ -149,7 +150,7 @@ int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept { else { *buf = mi_strdup(p); if (*buf==NULL) return ENOMEM; - if (size != NULL) *size = strlen(p); + if (size != NULL) *size = _mi_strlen(p); } return 0; } diff --git a/depends/mimalloc/src/alloc.c b/depends/mimalloc/src/alloc.c index 02d009e2f7d4..3e912726ff46 100644 --- a/depends/mimalloc/src/alloc.c +++ b/depends/mimalloc/src/alloc.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -9,15 +9,16 @@ terms of the MIT license. A copy of the license can be found in the file #endif #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() - -#include // memset, strlen -#include // malloc, exit +#include // memset, strlen (for mi_strdup) +#include // malloc, abort #define MI_IN_ALLOC_C #include "alloc-override.c" +#include "free.c" #undef MI_IN_ALLOC_C // ------------------------------------------------------ @@ -26,84 +27,135 @@ terms of the MIT license. A copy of the license can be found in the file // Fast allocation in a page: just pop from the free list. // Fall back to generic allocation only if the list is empty. -extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept { - mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size); +// Note: in release mode the (inlined) routine is about 7 instructions with a single test. +extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept +{ + if (page->block_size != 0) { // not the empty heap + mi_assert_internal(mi_page_block_size(page) >= size); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + } + + // check the free list mi_block_t* const block = page->free; if mi_unlikely(block == NULL) { - return _mi_malloc_generic(heap, size, zero); + return _mi_malloc_generic(heap, size, zero, 0); } mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); + // pop from the free list - page->used++; page->free = mi_block_next(page, block); + page->used++; mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); + mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE)); + + #if MI_DEBUG>3 + if (page->free_is_zero && size > sizeof(*block)) { + mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); + } + #endif - // allow use of the block internally + // allow use of the block internally // note: when tracking we need to avoid ever touching the MI_PADDING since - // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`) + // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`) mi_track_mem_undefined(block, mi_page_usable_block_size(page)); - + // zero the block? note: we need to zero the full block size (issue #63) if mi_unlikely(zero) { - mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) - const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size); - _mi_memzero_aligned(block, zsize - MI_PADDING_SIZE); + mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) + mi_assert_internal(!mi_page_is_huge(page)); + #if MI_PADDING + mi_assert_internal(page->block_size >= MI_PADDING_SIZE); + #endif + if (page->free_is_zero) { + block->next = 0; + mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE); + } + else { + _mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE); + } } -#if (MI_DEBUG>0) && !MI_TRACK_ENABLED - if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); } -#elif (MI_SECURE!=0) + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + if (!zero && !mi_page_is_huge(page)) { + memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); + } + #elif (MI_SECURE!=0) if (!zero) { block->next = 0; } // don't leak internal data -#endif + #endif -#if (MI_STAT>0) + #if (MI_STAT>0) const size_t bsize = mi_page_usable_block_size(page); - if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { - mi_heap_stat_increase(heap, normal, bsize); - mi_heap_stat_counter_increase(heap, normal_count, 1); -#if (MI_STAT>1) + if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { + mi_heap_stat_increase(heap, malloc_normal, bsize); + mi_heap_stat_counter_increase(heap, malloc_normal_count, 1); + #if (MI_STAT>1) const size_t bin = _mi_bin(bsize); - mi_heap_stat_increase(heap, normal_bins[bin], 1); -#endif + mi_heap_stat_increase(heap, malloc_bins[bin], 1); + mi_heap_stat_increase(heap, malloc_requested, size - MI_PADDING_SIZE); + #endif } -#endif + #endif -#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED - mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); - ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); - #if (MI_DEBUG>1) - mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); - mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess + #if MI_PADDING // && !MI_TRACK_ENABLED + mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); + ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); + #if (MI_DEBUG>=2) + mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); + #endif + mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess + padding->canary = mi_ptr_encode_canary(page,block,page->keys); + padding->delta = (uint32_t)(delta); + #if MI_PADDING_CHECK + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)padding - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes + for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } + } + #endif #endif - padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); - padding->delta = (uint32_t)(delta); - uint8_t* fill = (uint8_t*)padding - delta; - const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes - for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } -#endif return block; } +// extra entries for improved efficiency in `alloc-aligned.c`. +extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,false); +} +extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,true); +} + +#if MI_GUARDED +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +#endif + static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { mi_assert(heap != NULL); - mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(size <= MI_SMALL_SIZE_MAX); -#if (MI_PADDING) - if (size == 0) { - size = sizeof(void*); + #if MI_DEBUG + const uintptr_t tid = _mi_thread_id(); + mi_assert(heap->tld->thread_id == 0 || heap->tld->thread_id == tid); // heaps are thread local + #endif + #if (MI_PADDING || MI_GUARDED) + if (size == 0) { size = sizeof(void*); } + #endif + #if MI_GUARDED + if (mi_heap_malloc_use_guarded(heap,size)) { + return _mi_heap_malloc_guarded(heap, size, zero); } -#endif + #endif + + // get page in constant time, and allocate from it mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); - void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); - mi_assert_internal(p == NULL || mi_usable_size(p) >= size); -#if MI_STAT>1 - if (p != NULL) { - if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); - } -#endif + void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); mi_track_malloc(p,size,zero); + + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif return p; } @@ -113,41 +165,52 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_h } mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept { - return mi_heap_malloc_small(mi_get_default_heap(), size); + return mi_heap_malloc_small(mi_prim_get_default_heap(), size); } // The main allocation function -extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { +extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { + // fast path for small objects if mi_likely(size <= MI_SMALL_SIZE_MAX) { + mi_assert_internal(huge_alignment == 0); return mi_heap_malloc_small_zero(heap, size, zero); } + #if MI_GUARDED + else if (huge_alignment==0 && mi_heap_malloc_use_guarded(heap,size)) { + return _mi_heap_malloc_guarded(heap, size, zero); + } + #endif else { + // regular allocation mi_assert(heap!=NULL); - mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local - void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic - mi_assert_internal(p == NULL || mi_usable_size(p) >= size); - #if MI_STAT>1 - if (p != NULL) { - if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); } - mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + mi_assert(heap->tld->thread_id == 0 || heap->tld->thread_id == _mi_thread_id()); // heaps are thread local + void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic + mi_track_malloc(p,size,zero); + + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); } #endif - mi_track_malloc(p,size,zero); return p; } } +extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { + return _mi_heap_malloc_zero_ex(heap, size, zero, 0); +} + mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { return _mi_heap_malloc_zero(heap, size, false); } mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept { - return mi_heap_malloc(mi_get_default_heap(), size); + return mi_heap_malloc(mi_prim_get_default_heap(), size); } // zero initialized small block mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { - return mi_heap_malloc_small_zero(mi_get_default_heap(), size, true); + return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true); } mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { @@ -155,467 +218,9 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* } mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { - return mi_heap_zalloc(mi_get_default_heap(),size); -} - - -// ------------------------------------------------------ -// Check for double free in secure and debug mode -// This is somewhat expensive so only enabled for secure mode 4 -// ------------------------------------------------------ - -#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) -// linear check if the free list contains a specific element -static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { - while (list != NULL) { - if (elem==list) return true; - list = mi_block_next(page, list); - } - return false; -} - -static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { - // The decoded value is in the same page (or NULL). - // Walk the free lists to verify positively if it is already freed - if (mi_list_contains(page, page->free, block) || - mi_list_contains(page, page->local_free, block) || - mi_list_contains(page, mi_page_thread_free(page), block)) - { - _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); - return true; - } - return false; -} - -#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } - -static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { - bool is_double_free = false; - mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field - if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? - (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? - { - // Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free? - // (continue in separate function to improve code generation) - is_double_free = mi_check_is_double_freex(page, block); - } - return is_double_free; -} -#else -static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(page); - MI_UNUSED(block); - return false; -} -#endif - -// --------------------------------------------------------------------------- -// Check for heap block overflow by setting up padding at the end of the block -// --------------------------------------------------------------------------- - -#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED -static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { - *bsize = mi_page_usable_block_size(page); - const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); - mi_track_mem_defined(padding,sizeof(mi_padding_t)); - *delta = padding->delta; - uint32_t canary = padding->canary; - uintptr_t keys[2]; - keys[0] = page->keys[0]; - keys[1] = page->keys[1]; - bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); - mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); - return ok; -} - -// Return the exact usable size of a block. -static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { - size_t bsize; - size_t delta; - bool ok = mi_page_decode_padding(page, block, &delta, &bsize); - mi_assert_internal(ok); mi_assert_internal(delta <= bsize); - return (ok ? bsize - delta : 0); -} - -static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { - size_t bsize; - size_t delta; - bool ok = mi_page_decode_padding(page, block, &delta, &bsize); - *size = *wrong = bsize; - if (!ok) return false; - mi_assert_internal(bsize >= delta); - *size = bsize - delta; - uint8_t* fill = (uint8_t*)block + bsize - delta; - const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes - mi_track_mem_defined(fill,maxpad); - for (size_t i = 0; i < maxpad; i++) { - if (fill[i] != MI_DEBUG_PADDING) { - *wrong = bsize - delta + i; - ok = false; - break; - } - } - mi_track_mem_noaccess(fill,maxpad); - return ok; -} - -static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { - size_t size; - size_t wrong; - if (!mi_verify_padding(page,block,&size,&wrong)) { - _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); - } -} - -// When a non-thread-local block is freed, it becomes part of the thread delayed free -// list that is freed later by the owning heap. If the exact usable size is too small to -// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) -// so it will later not trigger an overflow error in `mi_free_block`. -static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { - size_t bsize; - size_t delta; - bool ok = mi_page_decode_padding(page, block, &delta, &bsize); - mi_assert_internal(ok); - if (!ok || (bsize - delta) >= min_size) return; // usually already enough space - mi_assert_internal(bsize >= min_size); - if (bsize < min_size) return; // should never happen - size_t new_delta = (bsize - min_size); - mi_assert_internal(new_delta < bsize); - mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); - padding->delta = (uint32_t)new_delta; -} -#else -static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(page); - MI_UNUSED(block); -} - -static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(block); - return mi_page_usable_block_size(page); -} - -static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { - MI_UNUSED(page); - MI_UNUSED(block); - MI_UNUSED(min_size); -} -#endif - -// only maintain stats for smaller objects if requested -#if (MI_STAT>0) -static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { - #if (MI_STAT < 2) - MI_UNUSED(block); - #endif - mi_heap_t* const heap = mi_heap_get_default(); - const size_t bsize = mi_page_usable_block_size(page); - #if (MI_STAT>1) - const size_t usize = mi_page_usable_size_of(page, block); - mi_heap_stat_decrease(heap, malloc, usize); - #endif - if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, normal, bsize); - #if (MI_STAT > 1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); - #endif - } - else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, large, bsize); - } - else { - mi_heap_stat_decrease(heap, huge, bsize); - } -} -#else -static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { - MI_UNUSED(page); MI_UNUSED(block); -} -#endif - -#if (MI_STAT>0) -// maintain stats for huge objects -static void mi_stat_huge_free(const mi_page_t* page) { - mi_heap_t* const heap = mi_heap_get_default(); - const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc` - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, large, bsize); - } - else { - mi_heap_stat_decrease(heap, huge, bsize); - } -} -#else -static void mi_stat_huge_free(const mi_page_t* page) { - MI_UNUSED(page); -} -#endif - -// ------------------------------------------------------ -// Free -// ------------------------------------------------------ - -// multi-threaded free (or free in huge block) -static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) -{ - // The padding check may access the non-thread-owned page for the key values. - // that is safe as these are constant and the page won't be freed (as the block is not freed yet). - mi_check_padding(page, block); - mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading - memset(block, MI_DEBUG_FREED, mi_usable_size(block)); - #endif - - // huge page segments are always abandoned and can be freed immediately - mi_segment_t* segment = _mi_page_segment(page); - if (segment->kind==MI_SEGMENT_HUGE) { - mi_stat_huge_free(page); - _mi_segment_huge_page_free(segment, page, block); - return; - } - - // Try to put the block on either the page-local thread free list, or the heap delayed free list. - mi_thread_free_t tfreex; - bool use_delayed; - mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); - do { - use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); - if mi_unlikely(use_delayed) { - // unlikely: this only happens on the first concurrent free in a page that is in the full list - tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); - } - else { - // usual: directly add to page thread_free list - mi_block_set_next(page, block, mi_tf_block(tfree)); - tfreex = mi_tf_set_block(tfree,block); - } - } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); - - if mi_unlikely(use_delayed) { - // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) - mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page); - mi_assert_internal(heap != NULL); - if (heap != NULL) { - // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) - mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); - do { - mi_block_set_nextx(heap,block,dfree, heap->keys); - } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); - } - - // and reset the MI_DELAYED_FREEING flag - tfree = mi_atomic_load_relaxed(&page->xthread_free); - do { - tfreex = tfree; - mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING); - tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); - } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); - } -} - -// regular free -static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) -{ - // and push it on the free list - //const size_t bsize = mi_page_block_size(page); - if mi_likely(local) { - // owning thread can free a block directly - if mi_unlikely(mi_check_is_double_free(page, block)) return; - mi_check_padding(page, block); - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED - memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); - #endif - mi_block_set_next(page, block, page->local_free); - page->local_free = block; - page->used--; - if mi_unlikely(mi_page_all_free(page)) { - _mi_page_retire(page); - } - else if mi_unlikely(mi_page_is_in_full(page)) { - _mi_page_unfull(page); - } - } - else { - _mi_free_block_mt(page,block); - } -} - - -// Adjust a block that was allocated aligned, to the actual start of the block in the page. -mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { - mi_assert_internal(page!=NULL && p!=NULL); - const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL); - const size_t adjust = (diff % mi_page_block_size(page)); - return (mi_block_t*)((uintptr_t)p - adjust); -} - - -static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept { - mi_page_t* const page = _mi_segment_page_of(segment, p); - mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p); - mi_stat_free(page, block); // stat_free may access the padding - mi_track_free(p); - _mi_free_block(page, local, block); -} - -// Get the segment data belonging to a pointer -// This is just a single `and` in assembly but does further checks in debug mode -// (and secure mode) if this was a valid pointer. -static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg) -{ - MI_UNUSED(msg); -#if (MI_DEBUG>0) - if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { - _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); - return NULL; - } -#endif - - mi_segment_t* const segment = _mi_ptr_segment(p); - if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL) - -#if (MI_DEBUG>0) - if mi_unlikely(!mi_is_in_heap_region(p)) { - _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n" - "(this may still be a valid very large allocation (over 64MiB))\n", msg, p); - if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) { - _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); - } - } -#endif -#if (MI_DEBUG>0 || MI_SECURE>=4) - if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { - _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); - return NULL; - } -#endif - return segment; -} - -// Free a block -void mi_free(void* p) mi_attr_noexcept -{ - mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); - if mi_unlikely(segment == NULL) return; - - mi_threadid_t tid = _mi_thread_id(); - mi_page_t* const page = _mi_segment_page_of(segment, p); - - if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks - // local, and not full or aligned - mi_block_t* block = (mi_block_t*)(p); - if mi_unlikely(mi_check_is_double_free(page,block)) return; - mi_check_padding(page, block); - mi_stat_free(page, block); - #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED - memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); - #endif - mi_track_free(p); - mi_block_set_next(page, block, page->local_free); - page->local_free = block; - if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) - _mi_page_retire(page); - } - } - else { - // non-local, aligned blocks, or a full page; use the more generic path - // note: recalc page in generic to improve code generation - mi_free_generic(segment, tid == segment->thread_id, p); - } -} - -// return true if successful -bool _mi_free_delayed_block(mi_block_t* block) { - // get segment and page - const mi_segment_t* const segment = _mi_ptr_segment(block); - mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(_mi_thread_id() == segment->thread_id); - mi_page_t* const page = _mi_segment_page_of(segment, block); - - // Clear the no-delayed flag so delayed freeing is used again for this page. - // This must be done before collecting the free lists on this page -- otherwise - // some blocks may end up in the page `thread_free` list with no blocks in the - // heap `thread_delayed_free` list which may cause the page to be never freed! - // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) - if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) { - return false; - } - - // collect all other non-local frees to ensure up-to-date `used` count - _mi_page_free_collect(page, false); - - // and free the block (possibly freeing the page as well since used is updated) - _mi_free_block(page, true, block); - return true; -} - -// Bytes available in a block -mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept { - const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p); - const size_t size = mi_page_usable_size_of(page, block); - const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; - mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); - return (size - adjust); -} - -static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { - const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); - if (segment==NULL) return 0; // also returns 0 if `p == NULL` - const mi_page_t* const page = _mi_segment_page_of(segment, p); - if mi_likely(!mi_page_has_aligned(page)) { - const mi_block_t* block = (const mi_block_t*)p; - return mi_page_usable_size_of(page, block); - } - else { - // split out to separate routine for improved code generation - return mi_page_usable_aligned_size_of(segment, page, p); - } -} - -mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { - return _mi_usable_size(p, "mi_usable_size"); -} - - -// ------------------------------------------------------ -// ensure explicit external inline definitions are emitted! -// ------------------------------------------------------ - -#ifdef __cplusplus -void* _mi_externs[] = { - (void*)&_mi_page_malloc, - (void*)&_mi_heap_malloc_zero, - (void*)&mi_malloc, - (void*)&mi_malloc_small, - (void*)&mi_zalloc_small, - (void*)&mi_heap_malloc, - (void*)&mi_heap_zalloc, - (void*)&mi_heap_malloc_small -}; -#endif - - -// ------------------------------------------------------ -// Allocation extensions -// ------------------------------------------------------ - -void mi_free_size(void* p, size_t size) mi_attr_noexcept { - MI_UNUSED_RELEASE(size); - mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size")); - mi_free(p); + return mi_heap_zalloc(mi_prim_get_default_heap(),size); } -void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { - MI_UNUSED_RELEASE(alignment); - mi_assert(((uintptr_t)p % alignment) == 0); - mi_free_size(p,size); -} - -void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { - MI_UNUSED_RELEASE(alignment); - mi_assert(((uintptr_t)p % alignment) == 0); - mi_free(p); -} mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { size_t total; @@ -624,7 +229,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* } mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { - return mi_heap_calloc(mi_get_default_heap(),count,size); + return mi_heap_calloc(mi_prim_get_default_heap(),count,size); } // Uninitialized `calloc` @@ -635,13 +240,13 @@ mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, } mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept { - return mi_heap_mallocn(mi_get_default_heap(),count,size); + return mi_heap_mallocn(mi_prim_get_default_heap(),count,size); } // Expand (or shrink) in place (or fail) void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { #if MI_PADDING - // we do not shrink/expand with padding enabled + // we do not shrink/expand with padding enabled MI_UNUSED(p); MI_UNUSED(newsize); return NULL; #else @@ -656,11 +261,12 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) // if p == NULL then behave as malloc. // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) - const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) + const size_t size = (p==NULL ? 0 : _mi_usable_size(p,"mi_realloc")); if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) - // todo: adjust potential padding to reflect the new size? - mi_track_free(p); - mi_track_malloc(p,newsize,true); + mi_assert_internal(p!=NULL); + // todo: do not track as the usable size is still the same in the free; adjust potential padding? + // mi_track_resize(p,size,newsize) + // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); } return p; // reallocation still fits and not more than 50% waste } void* newp = mi_heap_malloc(heap,newsize); @@ -668,14 +274,15 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) if (zero && newsize > size) { // also set last word in the previous allocation to zero to ensure any padding is zero-initialized const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); - memset((uint8_t*)newp + start, 0, newsize - start); + _mi_memzero((uint8_t*)newp + start, newsize - start); + } + else if (newsize == 0) { + ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725) } if mi_likely(p != NULL) { - if mi_likely(_mi_is_aligned(p, sizeof(uintptr_t))) { // a client may pass in an arbitrary pointer `p`.. - const size_t copysize = (newsize > size ? size : newsize); - mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking.. - _mi_memcpy_aligned(newp, p, copysize); - } + const size_t copysize = (newsize > size ? size : newsize); + mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking.. + _mi_memcpy(newp, p, copysize); mi_free(p); // only free the original pointer if successful } } @@ -683,7 +290,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) } mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { - return _mi_heap_realloc_zero(heap, p, newsize, false); + return _mi_heap_realloc_zero(heap, p, newsize, false); } mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { @@ -712,24 +319,24 @@ mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept { - return mi_heap_realloc(mi_get_default_heap(),p,newsize); + return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize); } mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept { - return mi_heap_reallocn(mi_get_default_heap(),p,count,size); + return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size); } // Reallocate but free `p` on errors mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept { - return mi_heap_reallocf(mi_get_default_heap(),p,newsize); + return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize); } mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept { - return mi_heap_rezalloc(mi_get_default_heap(), p, newsize); + return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize); } mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { - return mi_heap_recalloc(mi_get_default_heap(), p, count, size); + return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size); } @@ -741,31 +348,31 @@ mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_ // `strdup` using mi_malloc mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept { if (s == NULL) return NULL; - size_t n = strlen(s); - char* t = (char*)mi_heap_malloc(heap,n+1); - if (t != NULL) _mi_memcpy(t, s, n + 1); + size_t len = _mi_strlen(s); + char* t = (char*)mi_heap_malloc(heap,len+1); + if (t == NULL) return NULL; + _mi_memcpy(t, s, len); + t[len] = 0; return t; } mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept { - return mi_heap_strdup(mi_get_default_heap(), s); + return mi_heap_strdup(mi_prim_get_default_heap(), s); } // `strndup` using mi_malloc mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept { if (s == NULL) return NULL; - const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found) - const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string - mi_assert_internal(m <= n); - char* t = (char*)mi_heap_malloc(heap, m+1); + const size_t len = _mi_strnlen(s,n); // len <= n + char* t = (char*)mi_heap_malloc(heap, len+1); if (t == NULL) return NULL; - _mi_memcpy(t, s, m); - t[m] = 0; + _mi_memcpy(t, s, len); + t[len] = 0; return t; } mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { - return mi_heap_strndup(mi_get_default_heap(),s,n); + return mi_heap_strndup(mi_prim_get_default_heap(),s,n); } #ifndef __wasi__ @@ -774,7 +381,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_ #ifndef PATH_MAX #define PATH_MAX MAX_PATH #endif -#include + mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { // todo: use GetFullPathNameW to allow longer file names char buf[PATH_MAX]; @@ -793,6 +400,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const } } #else +/* #include // pathconf static size_t mi_path_max(void) { static size_t path_max = 0; @@ -804,25 +412,37 @@ static size_t mi_path_max(void) { } return path_max; } - +*/ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { if (resolved_name != NULL) { return realpath(fname,resolved_name); } else { - size_t n = mi_path_max(); + char* rname = realpath(fname, NULL); + if (rname == NULL) return NULL; + char* result = mi_heap_strdup(heap, rname); + mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok) + // note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-( + return result; + } + /* + const size_t n = mi_path_max(); char* buf = (char*)mi_malloc(n+1); - if (buf==NULL) return NULL; + if (buf == NULL) { + errno = ENOMEM; + return NULL; + } char* rname = realpath(fname,buf); char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL` mi_free(buf); return result; } + */ } #endif mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept { - return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name); + return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name); } #endif @@ -843,12 +463,16 @@ static bool mi_try_new_handler(bool nothrow) { #else std::new_handler h = std::set_new_handler(); std::set_new_handler(h); - #endif + #endif if (h==NULL) { - _mi_error_message(ENOMEM, "out of memory in 'new'"); + _mi_error_message(ENOMEM, "out of memory in 'new'"); + #if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled if (!nothrow) { throw std::bad_alloc(); } + #else + MI_UNUSED(nothrow); + #endif return false; } else { @@ -876,7 +500,7 @@ static std_new_handler_t mi_get_new_handler() { static bool mi_try_new_handler(bool nothrow) { std_new_handler_t h = mi_get_new_handler(); if (h==NULL) { - _mi_error_message(ENOMEM, "out of memory in 'new'"); + _mi_error_message(ENOMEM, "out of memory in 'new'"); if (!nothrow) { abort(); // cannot throw in plain C, use abort } @@ -889,20 +513,46 @@ static bool mi_try_new_handler(bool nothrow) { } #endif -static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) { +mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { void* p = NULL; while(p == NULL && mi_try_new_handler(nothrow)) { - p = mi_malloc(size); + p = mi_heap_malloc(heap,size); } return p; } -mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { - void* p = mi_malloc(size); - if mi_unlikely(p == NULL) return mi_try_new(size,false); +static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) { + return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { + void* p = mi_heap_malloc(heap,size); + if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false); return p; } +mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { + return mi_heap_alloc_new(mi_prim_get_default_heap(), size); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { + size_t total; + if mi_unlikely(mi_count_size_overflow(count, size, &total)) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_heap_alloc_new(heap,total); + } +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { + return mi_heap_alloc_new_n(mi_prim_get_default_heap(), count, size); +} + + mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept { void* p = mi_malloc(size); if mi_unlikely(p == NULL) return mi_try_new(size, true); @@ -927,17 +577,6 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz return p; } -mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { - size_t total; - if mi_unlikely(mi_count_size_overflow(count, size, &total)) { - mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc - return NULL; - } - else { - return mi_new(total); - } -} - mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) { void* q; do { @@ -956,3 +595,103 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { return mi_new_realloc(p, total); } } + +#if MI_GUARDED +// We always allocate a guarded allocation at an offset (`mi_page_has_aligned` will be true). +// We then set the first word of the block to `0` for regular offset aligned allocations (in `alloc-aligned.c`) +// and the first word to `~0` for guarded allocations to have a correct `mi_usable_size` + +static void* mi_block_ptr_set_guarded(mi_block_t* block, size_t obj_size) { + // TODO: we can still make padding work by moving it out of the guard page area + mi_page_t* const page = _mi_ptr_page(block); + mi_page_set_has_aligned(page, true); + block->next = MI_BLOCK_TAG_GUARDED; + + // set guard page at the end of the block + const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local` + const size_t os_page_size = _mi_os_page_size(); + mi_assert_internal(block_size >= obj_size + os_page_size + sizeof(mi_block_t)); + if (block_size < obj_size + os_page_size + sizeof(mi_block_t)) { + // should never happen + mi_free(block); + return NULL; + } + uint8_t* guard_page = (uint8_t*)block + block_size - os_page_size; + // note: the alignment of the guard page relies on blocks being os_page_size aligned which + // is ensured in `mi_arena_page_alloc_fresh`. + mi_assert_internal(_mi_is_aligned(block, os_page_size)); + mi_assert_internal(_mi_is_aligned(guard_page, os_page_size)); + if (!page->memid.is_pinned && _mi_is_aligned(guard_page, os_page_size)) { + _mi_os_protect(guard_page, os_page_size); + } + else { + _mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", block, block_size); + } + + // align pointer just in front of the guard page + size_t offset = block_size - os_page_size - obj_size; + mi_assert_internal(offset > sizeof(mi_block_t)); + if (offset > MI_PAGE_MAX_OVERALLOC_ALIGN) { + // give up to place it right in front of the guard page if the offset is too large for unalignment + offset = MI_PAGE_MAX_OVERALLOC_ALIGN; + } + void* p = (uint8_t*)block + offset; + mi_track_align(block, p, offset, obj_size); + mi_track_mem_defined(block, sizeof(mi_block_t)); + return p; +} + +mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept +{ + #if defined(MI_PADDING_SIZE) + mi_assert(MI_PADDING_SIZE==0); + #endif + // allocate multiple of page size ending in a guard page + // ensure minimal alignment requirement? + const size_t os_page_size = _mi_os_page_size(); + const size_t obj_size = (mi_option_is_enabled(mi_option_guarded_precise) ? size : _mi_align_up(size, MI_MAX_ALIGN_SIZE)); + const size_t bsize = _mi_align_up(_mi_align_up(obj_size, MI_MAX_ALIGN_SIZE) + sizeof(mi_block_t), MI_MAX_ALIGN_SIZE); + const size_t req_size = _mi_align_up(bsize + os_page_size, os_page_size); + mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */); + if (block==NULL) return NULL; + void* const p = mi_block_ptr_set_guarded(block, obj_size); + + // stats + mi_track_malloc(p, size, zero); + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + #if MI_STAT>1 + mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size); + mi_heap_stat_increase(heap, malloc_requested, size); + #endif + mi_heap_stat_counter_increase(heap, malloc_guarded_count, 1); + } + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; +} +#endif + +// ------------------------------------------------------ +// ensure explicit external inline definitions are emitted! +// ------------------------------------------------------ + +#ifdef __cplusplus +void* _mi_externs[] = { + (void*)&_mi_page_malloc, + (void*)&_mi_page_malloc_zero, + (void*)&_mi_heap_malloc_zero, + (void*)&_mi_heap_malloc_zero_ex, + (void*)&mi_malloc, + (void*)&mi_malloc_small, + (void*)&mi_zalloc_small, + (void*)&mi_heap_malloc, + (void*)&mi_heap_zalloc, + (void*)&mi_heap_malloc_small + // (void*)&mi_heap_alloc_new, + // (void*)&mi_heap_alloc_new_n +}; +#endif diff --git a/depends/mimalloc/src/arena-meta.c b/depends/mimalloc/src/arena-meta.c new file mode 100644 index 000000000000..3b64ab9b6367 --- /dev/null +++ b/depends/mimalloc/src/arena-meta.c @@ -0,0 +1,179 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- + We have a special "mini" allocator just for allocation of meta-data like + the heap (`mi_heap_t`) or thread-local data (`mi_tld_t`). + + We reuse the bitmap of the arena's for allocation of 64b blocks inside + an arena slice (64KiB). + We always ensure that meta data is zero'd (we zero on `free`) +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" + +/* ----------------------------------------------------------- + Meta data allocation +----------------------------------------------------------- */ + +#define MI_META_PAGE_SIZE MI_ARENA_SLICE_SIZE +#define MI_META_PAGE_ALIGN MI_ARENA_SLICE_ALIGN + +// large enough such that META_MAX_SIZE > 4k (even on 32-bit) +#define MI_META_BLOCK_SIZE (1 << (16 - MI_BCHUNK_BITS_SHIFT)) // 128 on 64-bit +#define MI_META_BLOCK_ALIGN MI_META_BLOCK_SIZE +#define MI_META_BLOCKS_PER_PAGE (MI_META_PAGE_SIZE / MI_META_BLOCK_SIZE) // 512 +#define MI_META_MAX_SIZE (MI_BCHUNK_SIZE * MI_META_BLOCK_SIZE) + +#if MI_META_MAX_SIZE <= 4096 +#error "max meta object size should be at least 4KiB" +#endif + +typedef struct mi_meta_page_s { + _Atomic(struct mi_meta_page_s*) next; // a linked list of meta-data pages (never released) + mi_memid_t memid; // provenance of the meta-page memory itself + mi_bbitmap_t blocks_free; // a small bitmap with 1 bit per block. +} mi_meta_page_t; + +static mi_decl_cache_align _Atomic(mi_meta_page_t*) mi_meta_pages = MI_ATOMIC_VAR_INIT(NULL); + + +#if MI_DEBUG > 1 +static mi_meta_page_t* mi_meta_page_of_ptr(void* p, size_t* block_idx) { + mi_meta_page_t* mpage = (mi_meta_page_t*)((uint8_t*)mi_align_down_ptr(p,MI_META_PAGE_ALIGN) + _mi_os_secure_guard_page_size()); + if (block_idx != NULL) { + *block_idx = ((uint8_t*)p - (uint8_t*)mpage) / MI_META_BLOCK_SIZE; + } + return mpage; +} +#endif + +static mi_meta_page_t* mi_meta_page_next( mi_meta_page_t* mpage ) { + return mi_atomic_load_ptr_acquire(mi_meta_page_t, &mpage->next); +} + +static void* mi_meta_block_start( mi_meta_page_t* mpage, size_t block_idx ) { + mi_assert_internal(_mi_is_aligned((uint8_t*)mpage - _mi_os_secure_guard_page_size(), MI_META_PAGE_ALIGN)); + mi_assert_internal(block_idx < MI_META_BLOCKS_PER_PAGE); + void* p = ((uint8_t*)mpage - _mi_os_secure_guard_page_size() + (block_idx * MI_META_BLOCK_SIZE)); + mi_assert_internal(mpage == mi_meta_page_of_ptr(p,NULL)); + return p; +} + +// allocate a fresh meta page and add it to the global list. +static mi_meta_page_t* mi_meta_page_zalloc(void) { + // allocate a fresh arena slice + // note: careful with _mi_subproc as it may recurse into mi_tld and meta_page_zalloc again.. (same with _mi_os_numa_node()...) + mi_memid_t memid; + uint8_t* base = (uint8_t*)_mi_arenas_alloc_aligned(_mi_subproc(), MI_META_PAGE_SIZE, MI_META_PAGE_ALIGN, 0, + true /* commit*/, (MI_SECURE==0) /* allow large? */, + NULL /* req arena */, 0 /* thread_seq */, -1 /* numa node */, &memid); + if (base == NULL) return NULL; + mi_assert_internal(_mi_is_aligned(base,MI_META_PAGE_ALIGN)); + if (!memid.initially_zero) { + _mi_memzero_aligned(base, MI_ARENA_SLICE_SIZE); + } + + // guard pages + #if MI_SECURE >= 1 + _mi_os_secure_guard_page_set_at(base, memid); + _mi_os_secure_guard_page_set_before(base + MI_META_PAGE_SIZE, memid); + #endif + + // initialize the page and free block bitmap + mi_meta_page_t* mpage = (mi_meta_page_t*)(base + _mi_os_secure_guard_page_size()); + mpage->memid = memid; + mi_bbitmap_init(&mpage->blocks_free, MI_META_BLOCKS_PER_PAGE, true /* already_zero */); + const size_t mpage_size = offsetof(mi_meta_page_t,blocks_free) + mi_bbitmap_size(MI_META_BLOCKS_PER_PAGE, NULL); + const size_t info_blocks = _mi_divide_up(mpage_size,MI_META_BLOCK_SIZE); + const size_t guard_blocks = _mi_divide_up(_mi_os_secure_guard_page_size(), MI_META_BLOCK_SIZE); + mi_assert_internal(info_blocks + 2*guard_blocks < MI_META_BLOCKS_PER_PAGE); + mi_bbitmap_unsafe_setN(&mpage->blocks_free, info_blocks + guard_blocks, MI_META_BLOCKS_PER_PAGE - info_blocks - 2*guard_blocks); + + // push atomically in front of the meta page list + // (note: there is no ABA issue since we never free meta-pages) + mi_meta_page_t* old = mi_atomic_load_ptr_acquire(mi_meta_page_t,&mi_meta_pages); + do { + mi_atomic_store_ptr_release(mi_meta_page_t, &mpage->next, old); + } while(!mi_atomic_cas_ptr_weak_acq_rel(mi_meta_page_t,&mi_meta_pages,&old,mpage)); + return mpage; +} + + +// allocate meta-data +mi_decl_noinline void* _mi_meta_zalloc( size_t size, mi_memid_t* pmemid ) +{ + mi_assert_internal(pmemid != NULL); + size = _mi_align_up(size,MI_META_BLOCK_SIZE); + if (size == 0 || size > MI_META_MAX_SIZE) return NULL; + const size_t block_count = _mi_divide_up(size,MI_META_BLOCK_SIZE); + mi_assert_internal(block_count > 0 && block_count < MI_BCHUNK_BITS); + mi_meta_page_t* mpage0 = mi_atomic_load_ptr_acquire(mi_meta_page_t,&mi_meta_pages); + mi_meta_page_t* mpage = mpage0; + while (mpage != NULL) { + size_t block_idx; + if (mi_bbitmap_try_find_and_clearN(&mpage->blocks_free, block_count, 0, &block_idx)) { + // found and claimed `block_count` blocks + *pmemid = _mi_memid_create_meta(mpage, block_idx, block_count); + return mi_meta_block_start(mpage,block_idx); + } + else { + mpage = mi_meta_page_next(mpage); + } + } + // failed to find space in existing pages + if (mi_atomic_load_ptr_acquire(mi_meta_page_t,&mi_meta_pages) != mpage0) { + // the page list was updated by another thread in the meantime, retry + return _mi_meta_zalloc(size,pmemid); + } + // otherwise, allocate a fresh metapage and try once more + mpage = mi_meta_page_zalloc(); + if (mpage != NULL) { + size_t block_idx; + if (mi_bbitmap_try_find_and_clearN(&mpage->blocks_free, block_count, 0, &block_idx)) { + // found and claimed `block_count` blocks + *pmemid = _mi_memid_create_meta(mpage, block_idx, block_count); + return mi_meta_block_start(mpage,block_idx); + } + } + // if all this failed, allocate from the OS + return _mi_os_alloc(size, pmemid); +} + +// free meta-data +mi_decl_noinline void _mi_meta_free(void* p, size_t size, mi_memid_t memid) { + if (p==NULL) return; + if (memid.memkind == MI_MEM_META) { + mi_assert_internal(_mi_divide_up(size, MI_META_BLOCK_SIZE) == memid.mem.meta.block_count); + const size_t block_count = memid.mem.meta.block_count; + const size_t block_idx = memid.mem.meta.block_index; + mi_meta_page_t* mpage = (mi_meta_page_t*)memid.mem.meta.meta_page; + mi_assert_internal(mi_meta_page_of_ptr(p,NULL) == mpage); + mi_assert_internal(block_idx + block_count <= MI_META_BLOCKS_PER_PAGE); + mi_assert_internal(mi_bbitmap_is_clearN(&mpage->blocks_free, block_idx, block_count)); + // we zero on free (and on the initial page allocation) so we don't need a "dirty" map + _mi_memzero_aligned(mi_meta_block_start(mpage, block_idx), block_count*MI_META_BLOCK_SIZE); + mi_bbitmap_setN(&mpage->blocks_free, block_idx, block_count); + } + else { + _mi_arenas_free(p,size,memid); + } +} + +// used for debug output +bool _mi_meta_is_meta_page(void* p) +{ + mi_meta_page_t* mpage0 = mi_atomic_load_ptr_acquire(mi_meta_page_t, &mi_meta_pages); + mi_meta_page_t* mpage = mpage0; + while (mpage != NULL) { + if ((void*)mpage == p) return true; + mpage = mi_meta_page_next(mpage); + } + return false; +} diff --git a/depends/mimalloc/src/arena.c b/depends/mimalloc/src/arena.c index 239c56675619..b26f444288f5 100644 --- a/depends/mimalloc/src/arena.c +++ b/depends/mimalloc/src/arena.c @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2019-2022, Microsoft Research, Daan Leijen +Copyright (c) 2019-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -11,140 +11,155 @@ large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB). In contrast to the rest of mimalloc, the arenas are shared between threads and need to be accessed using atomic operations. -Currently arenas are only used to for huge OS page (1GiB) reservations, -or direct OS memory reservations -- otherwise it delegates to direct allocation from the OS. -In the future, we can expose an API to manually add more kinds of arenas -which is sometimes needed for embedded devices or shared memory for example. -(We can also employ this with WASI or `sbrk` systems to reserve large arenas - on demand and be able to reuse them efficiently). +Arenas are also used to for huge OS page (1GiB) reservations or for reserving +OS memory upfront which can be improve performance or is sometimes needed +on embedded devices. We can also employ this with WASI or `sbrk` systems +to reserve large arenas upfront and be able to reuse the memory more effectively. -The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. +The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. -----------------------------------------------------------------------------*/ -#include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include // memset -#include // ENOMEM -#include "bitmap.h" // atomic bitmap +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" -// os.c -void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats); -void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats); +/* ----------------------------------------------------------- + Arena id's +----------------------------------------------------------- */ -void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize); -void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats); +mi_arena_id_t _mi_arena_id_none(void) { + return NULL; +} -bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +mi_arena_t* _mi_arena_from_id(mi_arena_id_t id) { + return (mi_arena_t*)id; +} -/* ----------------------------------------------------------- - Arena allocation ------------------------------------------------------------ */ +static bool mi_arena_id_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena) { + return ((arena == req_arena) || // they match, + (req_arena == NULL && !arena->is_exclusive)); // or the arena is not exclusive, and we didn't request a specific one +} -// Block info: bit 0 contains the `in_use` bit, the upper bits the -// size in count of arena blocks. -typedef uintptr_t mi_block_info_t; -#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 8MiB (must be at least MI_SEGMENT_ALIGN) -#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 4MiB -#define MI_MAX_ARENAS (64) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) - -// A memory arena descriptor -typedef struct mi_arena_s { - mi_arena_id_t id; // arena id; 0 for non-specific - bool exclusive; // only allow allocations if specifically for this arena - _Atomic(uint8_t*) start; // the start of the memory area - size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) - size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) - int numa_node; // associated NUMA node - bool is_zero_init; // is the arena zero initialized? - bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL - bool is_large; // large- or huge OS pages (always committed) - _Atomic(size_t) search_idx; // optimization to start the search for free blocks - mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? - mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) - mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) -} mi_arena_t; - - -// The available arenas -static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS]; -static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0 +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_t* request_arena) { + if (memid.memkind == MI_MEM_ARENA) { + return mi_arena_id_is_suitable(memid.mem.arena.arena, request_arena); + } + else { + return mi_arena_id_is_suitable(NULL, request_arena); + } +} +size_t mi_arenas_get_count(mi_subproc_t* subproc) { + return mi_atomic_load_relaxed(&subproc->arena_count); +} -/* ----------------------------------------------------------- - Arena id's - 0 is used for non-arena's (like OS memory) - id = arena_index + 1 ------------------------------------------------------------ */ +mi_arena_t* mi_arena_from_index(mi_subproc_t* subproc, size_t idx) { + mi_assert_internal(idx < mi_arenas_get_count(subproc)); + return mi_atomic_load_ptr_relaxed(mi_arena_t, &subproc->arenas[idx]); +} -static size_t mi_arena_id_index(mi_arena_id_t id) { - return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1); +static size_t mi_arena_info_slices(mi_arena_t* arena) { + return arena->info_slices; } -static mi_arena_id_t mi_arena_id_create(size_t arena_index) { - mi_assert_internal(arena_index < MI_MAX_ARENAS); - mi_assert_internal(MI_MAX_ARENAS <= 126); - int id = (int)arena_index + 1; - mi_assert_internal(id >= 1 && id <= 127); - return id; +#if MI_DEBUG > 1 +static bool mi_arena_has_page(mi_arena_t* arena, mi_page_t* page) { + return (page->memid.memkind == MI_MEM_ARENA && + page->memid.mem.arena.arena == arena && + mi_bitmap_is_setN(arena->pages, page->memid.mem.arena.slice_index, 1)); } +#endif -mi_arena_id_t _mi_arena_id_none(void) { - return 0; +size_t mi_arena_min_alignment(void) { + return MI_ARENA_SLICE_ALIGN; } -static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) { - return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) || - (arena_id == req_arena_id)); +mi_decl_nodiscard static bool mi_arena_commit(mi_arena_t* arena, void* start, size_t size, bool* is_zero, size_t already_committed) { + if (arena != NULL && arena->commit_fun != NULL) { + return (*arena->commit_fun)(true, start, size, is_zero, arena->commit_fun_arg); + } + else if (already_committed > 0) { + return _mi_os_commit_ex(start, size, is_zero, already_committed); + } + else { + return _mi_os_commit(start, size, is_zero); + } } + /* ----------------------------------------------------------- - Arena allocations get a memory id where the lower 8 bits are - the arena id, and the upper bits the block index. + Util ----------------------------------------------------------- */ -// Use `0` as a special id for direct OS allocated memory. -#define MI_MEMID_OS 0 -static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) { - mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow? - mi_assert_internal(id >= 0 && id <= 0x7F); - return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0)); +// Size of an arena +static size_t mi_arena_size(mi_arena_t* arena) { + return mi_size_of_slices(arena->slice_count); } -static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { - *bitmap_index = (arena_memid >> 8); - mi_arena_id_t id = (int)(arena_memid & 0x7F); - *arena_index = mi_arena_id_index(id); - return ((arena_memid & 0x80) != 0); +// Start of the arena memory area +static uint8_t* mi_arena_start(mi_arena_t* arena) { + return ((uint8_t*)arena); } -bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) { - mi_arena_id_t id = (int)(arena_memid & 0x7F); - bool exclusive = ((arena_memid & 0x80) != 0); - return mi_arena_id_is_suitable(id, exclusive, request_arena_id); +// Start of a slice +uint8_t* mi_arena_slice_start(mi_arena_t* arena, size_t slice_index) { + return (mi_arena_start(arena) + mi_size_of_slices(slice_index)); } -static size_t mi_block_count_of_size(size_t size) { - return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE); +// Arena area +void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { + if (size != NULL) *size = 0; + mi_arena_t* arena = _mi_arena_from_id(arena_id); + if (arena == NULL) return NULL; + if (size != NULL) { *size = mi_size_of_slices(arena->slice_count); } + return mi_arena_start(arena); } -/* ----------------------------------------------------------- - Thread safe allocation in an arena ------------------------------------------------------------ */ -static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx) -{ - size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter - if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) { - mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around - return true; - }; - return false; + +// Create an arena memid +static mi_memid_t mi_memid_create_arena(mi_arena_t* arena, size_t slice_index, size_t slice_count) { + mi_assert_internal(slice_index < UINT32_MAX); + mi_assert_internal(slice_count < UINT32_MAX); + mi_assert_internal(slice_count > 0); + mi_assert_internal(slice_index < arena->slice_count); + mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA); + memid.mem.arena.arena = arena; + memid.mem.arena.slice_index = (uint32_t)slice_index; + memid.mem.arena.slice_count = (uint32_t)slice_count; + return memid; +} + +// get the arena and slice span +static mi_arena_t* mi_arena_from_memid(mi_memid_t memid, size_t* slice_index, size_t* slice_count) { + mi_assert_internal(memid.memkind == MI_MEM_ARENA); + mi_arena_t* arena = memid.mem.arena.arena; + if (slice_index) *slice_index = memid.mem.arena.slice_index; + if (slice_count) *slice_count = memid.mem.arena.slice_count; + return arena; +} + +static mi_arena_t* mi_page_arena(mi_page_t* page, size_t* slice_index, size_t* slice_count) { + // todo: maybe store the arena* directly in the page? + return mi_arena_from_memid(page->memid, slice_index, slice_count); +} + +static size_t mi_page_full_size(mi_page_t* page) { + if (page->memid.memkind == MI_MEM_ARENA) { + return page->memid.mem.arena.slice_count * MI_ARENA_SLICE_SIZE; + } + else if (mi_memid_is_os(page->memid) || page->memid.memkind == MI_MEM_EXTERNAL) { + mi_assert_internal((uint8_t*)page->memid.mem.os.base <= (uint8_t*)page); + const ptrdiff_t presize = (uint8_t*)page - (uint8_t*)page->memid.mem.os.base; + mi_assert_internal((ptrdiff_t)page->memid.mem.os.size >= presize); + return (presize > (ptrdiff_t)page->memid.mem.os.size ? 0 : page->memid.mem.os.size - presize); + } + else { + return 0; + } } @@ -152,284 +167,1175 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* Arena Allocation ----------------------------------------------------------- */ -static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, - bool* commit, bool* large, bool* is_pinned, bool* is_zero, - mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) +static mi_decl_noinline void* mi_arena_try_alloc_at( + mi_arena_t* arena, size_t slice_count, bool commit, size_t tseq, mi_memid_t* memid) { - MI_UNUSED(arena_index); - mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); - if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL; - - mi_bitmap_index_t bitmap_index; - if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL; - - // claimed it! set the dirty bits (todo: no need for an atomic op here?) - void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE); - *memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index); - *is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); - *large = arena->is_large; - *is_pinned = (arena->is_large || !arena->allow_decommit); - if (arena->blocks_committed == NULL) { - // always committed - *commit = true; - } - else if (*commit) { - // arena not committed as a whole, but commit requested: ensure commit now - bool any_uncommitted; - _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); - if (any_uncommitted) { - bool commit_zero; - _mi_os_commit(p, needed_bcount * MI_ARENA_BLOCK_SIZE, &commit_zero, tld->stats); - if (commit_zero) *is_zero = true; + size_t slice_index; + if (!mi_bbitmap_try_find_and_clearN(arena->slices_free, slice_count, tseq, &slice_index)) return NULL; + + // claimed it! + void* p = mi_arena_slice_start(arena, slice_index); + *memid = mi_memid_create_arena(arena, slice_index, slice_count); + memid->is_pinned = arena->memid.is_pinned; + + // set the dirty bits and track which slices become accessible + size_t touched_slices = slice_count; + if (arena->memid.initially_zero) { + size_t already_dirty = 0; + memid->initially_zero = mi_bitmap_setN(arena->slices_dirty, slice_index, slice_count, &already_dirty); + mi_assert_internal(already_dirty <= touched_slices); + touched_slices -= already_dirty; + } + + // set commit state + if (commit) { + // commit requested, but the range may not be committed as a whole: ensure it is committed now + const size_t already_committed = mi_bitmap_popcountN(arena->slices_committed, slice_index, slice_count); + if (already_committed < slice_count) { + // not all committed, try to commit now + bool commit_zero = false; + if (!_mi_os_commit_ex(p, mi_size_of_slices(slice_count), &commit_zero, mi_size_of_slices(slice_count - already_committed))) { + // if the commit fails, release ownership, and return NULL; + // note: this does not roll back dirty bits but that is ok. + mi_bbitmap_setN(arena->slices_free, slice_index, slice_count); + return NULL; + } + if (commit_zero) { + memid->initially_zero = true; + } + + // set the commit bits + mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, NULL); + + // committed + #if MI_DEBUG > 1 + if (memid->initially_zero) { + if (!mi_mem_is_zero(p, mi_size_of_slices(slice_count))) { + _mi_error_message(EFAULT, "interal error: arena allocation was not zero-initialized!\n"); + memid->initially_zero = false; + } + } + #endif + } + else { + // already fully committed. + _mi_os_reuse(p, mi_size_of_slices(slice_count)); + // if the OS has overcommit, and this is the first time we access these pages, then + // count the commit now (as at arena reserve we didn't count those commits as these are on-demand) + if (_mi_os_has_overcommit() && touched_slices > 0) { + mi_subproc_stat_increase( arena->subproc, committed, mi_size_of_slices(touched_slices)); + } + } + + mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); + memid->initially_committed = true; + + // tool support + if (memid->initially_zero) { + mi_track_mem_defined(p, slice_count * MI_ARENA_SLICE_SIZE); + } + else { + mi_track_mem_undefined(p, slice_count * MI_ARENA_SLICE_SIZE); } } else { - // no need to commit, but check if already fully committed - *commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); + // no need to commit, but check if it is already fully committed + memid->initially_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count); + if (!memid->initially_committed) { + // partly committed.. adjust stats + size_t already_committed_count = 0; + mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed_count); + mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count); + mi_os_stat_decrease(committed, mi_size_of_slices(already_committed_count)); + } } + + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + if (commit) { mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); } + if (commit) { mi_assert_internal(memid->initially_committed); } + mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count)); + return p; } -static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, - bool* is_pinned, bool* is_zero, - mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) -{ - MI_UNUSED_RELEASE(alignment); - mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); - const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); - const size_t bcount = mi_block_count_of_size(size); - if mi_likely(max_arena == 0) return NULL; - mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE); - - size_t arena_index = mi_arena_id_index(req_arena_id); - if (arena_index < MI_MAX_ARENAS) { - // try a specific arena if requested - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]); - if (arena != NULL && - (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local? - (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages - { - void* p = mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); - mi_assert_internal((uintptr_t)p % alignment == 0); - if (p != NULL) return p; + +static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id); + +// try to reserve a fresh arena space +static bool mi_arena_reserve(mi_subproc_t* subproc, size_t req_size, bool allow_large, mi_arena_id_t* arena_id) +{ + const size_t arena_count = mi_arenas_get_count(subproc); + if (arena_count > (MI_MAX_ARENAS - 4)) return false; + + // calc reserve + size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); + if (arena_reserve == 0) return false; + + if (!_mi_os_has_virtual_reserve()) { + arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example) + } + arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_SLICE_SIZE); + + if (arena_count >= 1 && arena_count <= 128) { + // scale up the arena sizes exponentially every 4 entries + const size_t multiplier = (size_t)1 << _mi_clamp(arena_count/4, 0, 16); + size_t reserve = 0; + if (!mi_mul_overflow(multiplier, arena_reserve, &reserve)) { + arena_reserve = reserve; } } - else { - // try numa affine allocation - for (size_t i = 0; i < max_arena; i++) { - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); - if (arena == NULL) break; // end reached - if ((arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local? - (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages - { - void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); - mi_assert_internal((uintptr_t)p % alignment == 0); - if (p != NULL) return p; - } + + // check arena bounds + const size_t min_reserve = MI_ARENA_MIN_SIZE; + const size_t max_reserve = MI_ARENA_MAX_SIZE; // 16 GiB + if (arena_reserve < min_reserve) { + arena_reserve = min_reserve; + } + else if (arena_reserve > max_reserve) { + arena_reserve = max_reserve; + } + + if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size + + // commit eagerly? + bool arena_commit = false; + const bool overcommit = _mi_os_has_overcommit(); + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = overcommit; } + else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } + + // on an OS with overcommit (Linux) we don't count the commit yet as it is on-demand. Once a slice + // is actually allocated for the first time it will be counted. + const bool adjust = (overcommit && arena_commit); + if (adjust) { mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve); } + // and try to reserve the arena + int err = mi_reserve_os_memory_ex2(subproc, arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id); + if (err != 0) { + if (adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve); } // roll back + // failed, try a smaller size? + const size_t small_arena_reserve = (MI_SIZE_BITS == 32 ? 128*MI_MiB : 1*MI_GiB); + if (adjust) { mi_subproc_stat_adjust_decrease( subproc, committed, arena_reserve); } + if (arena_reserve > small_arena_reserve) { + // try again + err = mi_reserve_os_memory_ex(small_arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id); + if (err != 0 && adjust) { mi_subproc_stat_adjust_increase( subproc, committed, arena_reserve); } // roll back } + } + return (err==0); +} - // try from another numa node instead.. - for (size_t i = 0; i < max_arena; i++) { - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); - if (arena == NULL) break; // end reached - if ((arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local! - (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages - { - void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); - mi_assert_internal((uintptr_t)p % alignment == 0); - if (p != NULL) return p; - } + + + +/* ----------------------------------------------------------- + Arena iteration +----------------------------------------------------------- */ + +static inline bool mi_arena_is_suitable(mi_arena_t* arena, mi_arena_t* req_arena, bool match_numa, int numa_node, bool allow_pinned) { + if (!allow_pinned && arena->memid.is_pinned) return false; + if (!mi_arena_id_is_suitable(arena, req_arena)) return false; + if (req_arena == NULL) { // if not specific, check numa affinity + const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node); + if (match_numa) { if (!numa_suitable) return false; } + else { if (numa_suitable) return false; } + } + return true; +} + +#define mi_forall_arenas(subproc, req_arena, tseq, name_arena) { \ + const size_t _arena_count = mi_arenas_get_count(subproc); \ + const size_t _arena_cycle = (_arena_count == 0 ? 0 : _arena_count - 1); /* first search the arenas below the last one */ \ + /* always start searching in the arena's below the max */ \ + size_t _start = (_arena_cycle <= 1 ? 0 : (tseq % _arena_cycle)); \ + for (size_t _i = 0; _i < _arena_count; _i++) { \ + mi_arena_t* name_arena; \ + if (req_arena != NULL) { \ + name_arena = req_arena; /* if there is a specific req_arena, only search that one */\ + if (_i > 0) break; /* only once */ \ + } \ + else { \ + size_t _idx; \ + if (_i < _arena_cycle) { \ + _idx = _i + _start; \ + if (_idx >= _arena_cycle) { _idx -= _arena_cycle; } /* adjust so we rotate through the cycle */ \ + } \ + else { \ + _idx = _i; /* remaining arena's */ \ + } \ + name_arena = mi_arena_from_index(subproc,_idx); \ + } \ + if (name_arena != NULL) \ + { + +#define mi_forall_arenas_end() \ + } \ + } \ + } + +#define mi_forall_suitable_arenas(subproc, req_arena, tseq, match_numa, numa_node, allow_large, name_arena) \ + mi_forall_arenas(subproc, req_arena,tseq,name_arena) { \ + if (mi_arena_is_suitable(name_arena, req_arena, match_numa, numa_node, allow_large)) { \ + +#define mi_forall_suitable_arenas_end() \ + }} \ + mi_forall_arenas_end() + +/* ----------------------------------------------------------- + Arena allocation +----------------------------------------------------------- */ + +// allocate slices from the arenas +static mi_decl_noinline void* mi_arenas_try_find_free( + mi_subproc_t* subproc, size_t slice_count, size_t alignment, + bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid) +{ + mi_assert_internal(slice_count <= mi_slice_count_of_size(MI_ARENA_MAX_OBJ_SIZE)); + mi_assert(alignment <= MI_ARENA_SLICE_ALIGN); + if (alignment > MI_ARENA_SLICE_ALIGN) return NULL; + + // search arena's + mi_forall_suitable_arenas(subproc, req_arena, tseq, true /* only numa matching */, numa_node, allow_large, arena) + { + void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid); + if (p != NULL) return p; + } + mi_forall_suitable_arenas_end(); + if (numa_node < 0) return NULL; + + // search again but now regardless of preferred numa affinity + mi_forall_suitable_arenas(subproc, req_arena, tseq, false /* numa non-matching now */, numa_node, allow_large, arena) + { + void* p = mi_arena_try_alloc_at(arena, slice_count, commit, tseq, memid); + if (p != NULL) return p; + } + mi_forall_suitable_arenas_end(); + return NULL; +} + +// Allocate slices from the arena's -- potentially allocating a fresh arena +static mi_decl_noinline void* mi_arenas_try_alloc( + mi_subproc_t* subproc, + size_t slice_count, size_t alignment, + bool commit, bool allow_large, + mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid) +{ + mi_assert(slice_count <= MI_ARENA_MAX_OBJ_SLICES); + mi_assert(alignment <= MI_ARENA_SLICE_ALIGN); + void* p; + + // try to find free slices in the arena's + p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid); + if (p != NULL) return p; + + // did we need a specific arena? + if (req_arena != NULL) return NULL; + + // don't create arena's while preloading (todo: or should we?) + if (_mi_preloading()) return NULL; + + // otherwise, try to reserve a new arena -- but one thread at a time.. (todo: allow 2 or 4 to reduce contention?) + const size_t arena_count = mi_arenas_get_count(subproc); + mi_lock(&subproc->arena_reserve_lock) { + if (arena_count == mi_arenas_get_count(subproc)) { + // we are the first to enter the lock, reserve a fresh arena + mi_arena_id_t arena_id = 0; + mi_arena_reserve(subproc, mi_size_of_slices(slice_count), allow_large, &arena_id); + } + else { + // another thread already reserved a new arena } } + // try once more to allocate in the new arena + mi_assert_internal(req_arena == NULL); + p = mi_arenas_try_find_free(subproc, slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid); + if (p != NULL) return p; + return NULL; } +// Allocate from the OS (if allowed) +static void* mi_arena_os_alloc_aligned( + size_t size, size_t alignment, size_t align_offset, + bool commit, bool allow_large, + mi_arena_id_t req_arena_id, mi_memid_t* memid) +{ + // if we cannot use OS allocation, return NULL + if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) { + errno = ENOMEM; + return NULL; + } + + if (align_offset > 0) { + return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid); + } + else { + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid); + } +} + -void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, - mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) +// Allocate large sized memory +void* _mi_arenas_alloc_aligned( mi_subproc_t* subproc, + size_t size, size_t alignment, size_t align_offset, + bool commit, bool allow_large, + mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid) { - mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL); + mi_assert_internal(memid != NULL); mi_assert_internal(size > 0); - *memid = MI_MEMID_OS; - *is_zero = false; - *is_pinned = false; - - bool default_large = false; - if (large==NULL) large = &default_large; // ensure `large != NULL` - const int numa_node = _mi_os_numa_node(tld); // current numa node // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) - if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) { - void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); + if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && // is arena allocation allowed? + size >= MI_ARENA_MIN_OBJ_SIZE && size <= MI_ARENA_MAX_OBJ_SIZE && // and not too small/large + alignment <= MI_ARENA_SLICE_ALIGN && align_offset == 0) // and good alignment + { + const size_t slice_count = mi_slice_count_of_size(size); + void* p = mi_arenas_try_alloc(subproc,slice_count, alignment, commit, allow_large, req_arena, tseq, numa_node, memid); if (p != NULL) return p; } - // finally, fall back to the OS - if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) { - errno = ENOMEM; + // fall back to the OS + void* p = mi_arena_os_alloc_aligned(size, alignment, align_offset, commit, allow_large, req_arena, memid); + return p; +} + +void* _mi_arenas_alloc(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, mi_arena_t* req_arena, size_t tseq, int numa_node, mi_memid_t* memid) +{ + return _mi_arenas_alloc_aligned(subproc, size, MI_ARENA_SLICE_SIZE, 0, commit, allow_large, req_arena, tseq, numa_node, memid); +} + + + +/* ----------------------------------------------------------- + Arena page allocation +----------------------------------------------------------- */ + +static bool mi_arena_try_claim_abandoned(size_t slice_index, mi_arena_t* arena, mi_heaptag_t heap_tag, bool* keep_abandoned) { + // found an abandoned page of the right size + mi_page_t* const page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); + // can we claim ownership? + if (!mi_page_try_claim_ownership(page)) { + // there was a concurrent free .. + // we need to keep it in the abandoned map as the free will call `mi_arena_page_unabandon`, + // and wait for readers (us!) to finish. This is why it is very important to set the abandoned + // bit again (or otherwise the unabandon will never stop waiting). + *keep_abandoned = true; + return false; + } + if (heap_tag != page->heap_tag) { + // wrong heap_tag.. we need to unown again + // note: this normally never happens unless heaptags are actually used. + // (an unown might free the page, and depending on that we can keep it in the abandoned map or not) + // note: a minor wrinkle: the page will still be mapped but the abandoned map entry is (temporarily) clear at this point. + // so we cannot check in `mi_arenas_free` for this invariant to hold. + const bool freed = _mi_page_unown(page); + *keep_abandoned = !freed; + return false; + } + // yes, we can reclaim it, keep the abandoned map entry clear + *keep_abandoned = false; + return true; +} + +static mi_page_t* mi_arenas_page_try_find_abandoned(mi_subproc_t* subproc, size_t slice_count, size_t block_size, mi_arena_t* req_arena, mi_heaptag_t heaptag, size_t tseq) +{ + MI_UNUSED(slice_count); + const size_t bin = _mi_bin(block_size); + mi_assert_internal(bin < MI_BIN_COUNT); + + // any abandoned in our size class? + mi_assert_internal(subproc != NULL); + if (mi_atomic_load_relaxed(&subproc->abandoned_count[bin]) == 0) { return NULL; } - *is_zero = true; - *memid = MI_MEMID_OS; - void* p = _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats); - if (p != NULL) *is_pinned = *large; - return p; + + // search arena's + const bool allow_large = true; + const int any_numa = -1; + const bool match_numa = true; + mi_forall_suitable_arenas(subproc, req_arena, tseq, match_numa, any_numa, allow_large, arena) + { + size_t slice_index; + mi_bitmap_t* const bitmap = arena->pages_abandoned[bin]; + + if (mi_bitmap_try_find_and_claim(bitmap, tseq, &slice_index, &mi_arena_try_claim_abandoned, arena, heaptag)) { + // found an abandoned page of the right size + // and claimed ownership. + mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(mi_arena_has_page(arena,page)); + mi_atomic_decrement_relaxed(&subproc->abandoned_count[bin]); + mi_tld_t* tld = _mi_thread_tld(); + mi_tld_stat_decrease( tld, pages_abandoned, 1); + mi_tld_stat_counter_increase( tld, pages_reclaim_on_alloc, 1); + + _mi_page_free_collect(page, false); // update `used` count + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); + mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count)); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page); + mi_assert_internal(mi_page_block_size(page) == block_size); + mi_assert_internal(!mi_page_is_full(page)); + return page; + } + } + mi_forall_suitable_arenas_end(); + return NULL; } -void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld) +// Allocate a fresh page +static mi_page_t* mi_arenas_page_alloc_fresh(size_t slice_count, size_t block_size, size_t block_alignment, + mi_arena_t* req_arena, int numa_node, bool commit, mi_tld_t* tld) { - return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld); + const bool allow_large = (MI_SECURE < 2); // 2 = guard page at end of each arena page + const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN); + const size_t page_alignment = MI_ARENA_SLICE_ALIGN; + + // try to allocate from free space in arena's + mi_memid_t memid = _mi_memid_none(); + mi_page_t* page = NULL; + const size_t alloc_size = mi_size_of_slices(slice_count); + if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) && // allowed to allocate from arena's? + !os_align && // not large alignment + slice_count <= MI_ARENA_MAX_OBJ_SLICES) // and not too large + { + page = (mi_page_t*)mi_arenas_try_alloc(tld->subproc, slice_count, page_alignment, commit, allow_large, req_arena, tld->thread_seq, numa_node, &memid); + if (page != NULL) { + mi_assert_internal(mi_bitmap_is_clearN(memid.mem.arena.arena->pages, memid.mem.arena.slice_index, memid.mem.arena.slice_count)); + mi_bitmap_set(memid.mem.arena.arena->pages, memid.mem.arena.slice_index); + } + } + + // otherwise fall back to the OS + if (page == NULL) { + if (os_align) { + // note: slice_count already includes the page + mi_assert_internal(slice_count >= mi_slice_count_of_size(block_size) + mi_slice_count_of_size(page_alignment)); + page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, block_alignment, page_alignment /* align offset */, commit, allow_large, req_arena, &memid); + } + else { + page = (mi_page_t*)mi_arena_os_alloc_aligned(alloc_size, page_alignment, 0 /* align offset */, commit, allow_large, req_arena, &memid); + } + } + + if (page == NULL) return NULL; + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(!os_align || _mi_is_aligned((uint8_t*)page + page_alignment, block_alignment)); + + // guard page at the end of mimalloc page? + #if MI_SECURE < 2 + const size_t page_noguard_size = alloc_size; + #else + mi_assert(alloc_size > _mi_os_secure_guard_page_size()); + const size_t page_noguard_size = alloc_size - _mi_os_secure_guard_page_size(); + if (memid.initially_committed) { + _mi_os_secure_guard_page_set_at((uint8_t*)page + page_noguard_size, memid); + } + #endif + + // claimed free slices: initialize the page partly + if (!memid.initially_zero && memid.initially_committed) { + mi_track_mem_undefined(page, slice_count * MI_ARENA_SLICE_SIZE); + _mi_memzero_aligned(page, sizeof(*page)); + } + else if (memid.initially_committed) { + mi_track_mem_defined(page, slice_count * MI_ARENA_SLICE_SIZE); + } + #if MI_DEBUG > 1 + if (memid.initially_zero && memid.initially_committed) { + if (!mi_mem_is_zero(page, page_noguard_size)) { + _mi_error_message(EFAULT, "internal error: page memory was not zero initialized.\n"); + memid.initially_zero = false; + _mi_memzero_aligned(page, sizeof(*page)); + } + } + #endif + mi_assert(MI_PAGE_INFO_SIZE >= mi_page_info_size()); + + size_t block_start; + #if MI_GUARDED + // in a guarded build, we align pages with blocks a multiple of an OS page size, to the OS page size + // this ensures that all blocks in such pages are OS page size aligned (which is needed for the guard pages) + const size_t os_page_size = _mi_os_page_size(); + mi_assert_internal(MI_PAGE_ALIGN >= os_page_size); + if (!os_align && block_size % os_page_size == 0 && block_size > os_page_size /* at least 2 or more */ ) { + block_start = _mi_align_up(mi_page_info_size(), os_page_size); + } + else + #endif + if (os_align) { + block_start = MI_PAGE_ALIGN; + } + else if (_mi_is_power_of_two(block_size) && block_size <= MI_PAGE_MAX_START_BLOCK_ALIGN2) { + // naturally align all power-of-2 blocks + block_start = _mi_align_up(mi_page_info_size(), block_size); + } + else { + // otherwise start after the info + block_start = mi_page_info_size(); + } + const size_t reserved = (os_align ? 1 : (page_noguard_size - block_start) / block_size); + mi_assert_internal(reserved > 0 && reserved <= UINT16_MAX); + + // commit first block? + size_t commit_size = 0; + if (!memid.initially_committed) { + commit_size = _mi_align_up(block_start + block_size, MI_PAGE_MIN_COMMIT_SIZE); + if (commit_size > page_noguard_size) { commit_size = page_noguard_size; } + bool is_zero; + if (!mi_arena_commit( mi_memid_arena(memid), page, commit_size, &is_zero, 0)) { + _mi_arenas_free(page, alloc_size, memid); + return NULL; + } + if (!memid.initially_zero && !is_zero) { + _mi_memzero_aligned(page, commit_size); + } + } + + // initialize + page->reserved = (uint16_t)reserved; + page->page_start = (uint8_t*)page + block_start; + page->block_size = block_size; + page->slice_committed = commit_size; + page->memid = memid; + page->free_is_zero = memid.initially_zero; + if (block_size > 0 && _mi_is_power_of_two(block_size)) { + page->block_size_shift = (uint8_t)mi_ctz(block_size); + } + else { + page->block_size_shift = 0; + } + // and own it + mi_page_try_claim_ownership(page); + + // register in the page map + _mi_page_map_register(page); + + // stats + mi_tld_stat_increase(tld, pages, 1); + mi_tld_stat_increase(tld, page_bins[_mi_page_bin(page)], 1); + + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page); + mi_assert_internal(mi_page_block_size(page) == block_size); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(mi_page_is_owned(page)); + + return page; } -void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { - if (size != NULL) *size = 0; - size_t arena_index = mi_arena_id_index(arena_id); - if (arena_index >= MI_MAX_ARENAS) return NULL; - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]); - if (arena == NULL) return NULL; - if (size != NULL) *size = arena->block_count * MI_ARENA_BLOCK_SIZE; - return arena->start; +// Allocate a regular small/medium/large page. +static mi_page_t* mi_arenas_page_regular_alloc(mi_heap_t* heap, size_t slice_count, size_t block_size) { + mi_arena_t* req_arena = heap->exclusive_arena; + mi_tld_t* const tld = heap->tld; + + // 1. look for an abandoned page + mi_page_t* page = mi_arenas_page_try_find_abandoned(tld->subproc, slice_count, block_size, req_arena, heap->tag, tld->thread_seq); + if (page != NULL) { + return page; // return as abandoned + } + + // 2. find a free block, potentially allocating a new arena + const long commit_on_demand = mi_option_get(mi_option_page_commit_on_demand); + const bool commit = (slice_count <= mi_slice_count_of_size(MI_PAGE_MIN_COMMIT_SIZE) || // always commit small pages + (commit_on_demand == 2 && _mi_os_has_overcommit()) || (commit_on_demand == 0)); + page = mi_arenas_page_alloc_fresh(slice_count, block_size, 1, req_arena, heap->numa_node, commit, tld); + if (page == NULL) return NULL; + + mi_assert_internal(page->memid.memkind != MI_MEM_ARENA || page->memid.mem.arena.slice_count == slice_count); + if (!_mi_page_init(heap, page)) { + _mi_arenas_free( page, mi_page_full_size(page), page->memid); + return NULL; + } + + return page; +} + +// Allocate a page containing one block (very large, or with large alignment) +static mi_page_t* mi_arenas_page_singleton_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { + mi_arena_t* req_arena = heap->exclusive_arena; + mi_tld_t* const tld = heap->tld; + const bool os_align = (block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN); + const size_t info_size = (os_align ? MI_PAGE_ALIGN : mi_page_info_size()); + #if MI_SECURE < 2 + const size_t slice_count = mi_slice_count_of_size(info_size + block_size); + #else + const size_t slice_count = mi_slice_count_of_size(_mi_align_up(info_size + block_size, _mi_os_secure_guard_page_size()) + _mi_os_secure_guard_page_size()); + #endif + + mi_page_t* page = mi_arenas_page_alloc_fresh(slice_count, block_size, block_alignment, req_arena, heap->numa_node, true /* commit singletons always */, tld); + if (page == NULL) return NULL; + + mi_assert(page->reserved == 1); + if (!_mi_page_init(heap, page)) { + _mi_arenas_free( page, mi_page_full_size(page), page->memid); + return NULL; + } + + return page; +} + + +mi_page_t* _mi_arenas_page_alloc(mi_heap_t* heap, size_t block_size, size_t block_alignment) { + mi_page_t* page; + if mi_unlikely(block_alignment > MI_PAGE_MAX_OVERALLOC_ALIGN) { + mi_assert_internal(_mi_is_power_of_two(block_alignment)); + page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment); + } + else if (block_size <= MI_SMALL_MAX_OBJ_SIZE) { + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_SMALL_PAGE_SIZE), block_size); + } + else if (block_size <= MI_MEDIUM_MAX_OBJ_SIZE) { + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_MEDIUM_PAGE_SIZE), block_size); + } + #if MI_ENABLE_LARGE_PAGES + else if (block_size <= MI_LARGE_MAX_OBJ_SIZE) { + page = mi_arenas_page_regular_alloc(heap, mi_slice_count_of_size(MI_LARGE_PAGE_SIZE), block_size); + } + #endif + else { + page = mi_arenas_page_singleton_alloc(heap, block_size, block_alignment); + } + // mi_assert_internal(page == NULL || _mi_page_segment(page)->subproc == tld->subproc); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(_mi_ptr_page(mi_page_start(page))==page); + mi_assert_internal(block_alignment <= MI_PAGE_MAX_OVERALLOC_ALIGN || _mi_is_aligned(mi_page_start(page), block_alignment)); + + return page; +} + +void _mi_arenas_page_free(mi_page_t* page, mi_tld_t* stats_tld /* can be NULL */) { + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_all_free(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(page->next==NULL && page->prev==NULL); + + if (stats_tld != NULL) { + mi_tld_stat_decrease(stats_tld, page_bins[_mi_page_bin(page)], 1); + mi_tld_stat_decrease(stats_tld, pages, 1); + } + else { + mi_os_stat_decrease(page_bins[_mi_page_bin(page)], 1); + mi_os_stat_decrease(pages, 1); + } + + #if MI_DEBUG>1 + if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) { + size_t bin = _mi_bin(mi_page_block_size(page)); + size_t slice_index; + size_t slice_count; + mi_arena_t* const arena = mi_page_arena(page, &slice_index, &slice_count); + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); + mi_assert_internal(mi_bitmap_is_clearN(arena->pages_abandoned[bin], slice_index, 1)); + mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1)); + // note: we cannot check for `!mi_page_is_abandoned_and_mapped` since that may + // be (temporarily) not true if the free happens while trying to reclaim + // see `mi_arana_try_claim_abandoned` + } + #endif + + // recommit guard page at the end? + // we must do this since we may later allocate large spans over this page and cannot have a guard page in between + #if MI_SECURE >= 2 + if (!page->memid.is_pinned) { + _mi_os_secure_guard_page_reset_before((uint8_t*)page + mi_page_full_size(page), page->memid); + } + #endif + + // unregister page + _mi_page_map_unregister(page); + if (page->memid.memkind == MI_MEM_ARENA) { + mi_arena_t* const arena = page->memid.mem.arena.arena; + mi_bitmap_clear(arena->pages, page->memid.mem.arena.slice_index); + if (page->slice_committed > 0) { + // if committed on-demand, set the commit bits to account commit properly + mi_assert_internal(mi_page_full_size(page) >= page->slice_committed); + const size_t total_slices = page->slice_committed / MI_ARENA_SLICE_SIZE; // conservative + //mi_assert_internal(mi_bitmap_is_clearN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices)); + mi_assert_internal(page->memid.mem.arena.slice_count >= total_slices); + if (total_slices > 0) { + mi_bitmap_setN(arena->slices_committed, page->memid.mem.arena.slice_index, total_slices, NULL); + } + // any left over? + const size_t extra = page->slice_committed % MI_ARENA_SLICE_SIZE; + if (extra > 0) { + // pretend it was decommitted already + mi_os_stat_decrease(committed, extra); + } + } + else { + mi_assert_internal(mi_bitmap_is_setN(arena->slices_committed, page->memid.mem.arena.slice_index, page->memid.mem.arena.slice_count)); + } + } + _mi_arenas_free(page, mi_page_full_size(page), page->memid); } +/* ----------------------------------------------------------- + Arena abandon +----------------------------------------------------------- */ + +void _mi_arenas_page_abandon(mi_page_t* page, mi_tld_t* tld) { + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(!mi_page_all_free(page)); + mi_assert_internal(page->next==NULL && page->prev == NULL); + + if (page->memid.memkind==MI_MEM_ARENA && !mi_page_is_full(page)) { + // make available for allocations + size_t bin = _mi_bin(mi_page_block_size(page)); + size_t slice_index; + size_t slice_count; + mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count); + mi_assert_internal(!mi_page_is_singleton(page)); + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); + mi_assert_internal(mi_bitmap_is_setN(arena->slices_dirty, slice_index, slice_count)); + + mi_page_set_abandoned_mapped(page); + const bool wasclear = mi_bitmap_set(arena->pages_abandoned[bin], slice_index); + MI_UNUSED(wasclear); mi_assert_internal(wasclear); + mi_atomic_increment_relaxed(&arena->subproc->abandoned_count[bin]); + mi_tld_stat_increase(tld, pages_abandoned, 1); + } + else { + // page is full (or a singleton), or the page is OS/externally allocated + // leave as is; it will be reclaimed when an object is free'd in the page + mi_subproc_t* subproc = _mi_subproc(); + // but for non-arena pages, add to the subproc list so these can be visited + if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) { + mi_lock(&subproc->os_abandoned_pages_lock) { + // push in front + page->prev = NULL; + page->next = subproc->os_abandoned_pages; + if (page->next != NULL) { page->next->prev = page; } + subproc->os_abandoned_pages = page; + } + } + mi_tld_stat_increase(tld, pages_abandoned, 1); + } + _mi_page_unown(page); +} + +bool _mi_arenas_page_try_reabandon_to_mapped(mi_page_t* page) { + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(!mi_page_is_abandoned_mapped(page)); + mi_assert_internal(!mi_page_is_full(page)); + mi_assert_internal(!mi_page_all_free(page)); + mi_assert_internal(!mi_page_is_singleton(page)); + if (mi_page_is_full(page) || mi_page_is_abandoned_mapped(page) || page->memid.memkind != MI_MEM_ARENA) { + return false; + } + else { + mi_tld_t* tld = _mi_thread_tld(); + mi_tld_stat_counter_increase( tld, pages_reabandon_full, 1); + mi_tld_stat_adjust_decrease( tld, pages_abandoned, 1); // adjust as we are not abandoning fresh + _mi_arenas_page_abandon(page,tld); + return true; + } +} + +// called from `mi_free` if trying to unabandon an abandoned page +void _mi_arenas_page_unabandon(mi_page_t* page) { + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + + if (mi_page_is_abandoned_mapped(page)) { + mi_assert_internal(page->memid.memkind==MI_MEM_ARENA); + // remove from the abandoned map + size_t bin = _mi_bin(mi_page_block_size(page)); + size_t slice_index; + size_t slice_count; + mi_arena_t* arena = mi_page_arena(page, &slice_index, &slice_count); + + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + mi_assert_internal(page->slice_committed > 0 || mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count)); + + // this busy waits until a concurrent reader (from alloc_abandoned) is done + mi_bitmap_clear_once_set(arena->pages_abandoned[bin], slice_index); + mi_page_clear_abandoned_mapped(page); + mi_atomic_decrement_relaxed(&arena->subproc->abandoned_count[bin]); + mi_tld_stat_decrease(_mi_thread_tld(), pages_abandoned, 1); + } + else { + // page is full (or a singleton), page is OS allocated + mi_tld_stat_decrease(_mi_thread_tld(), pages_abandoned, 1); + // if not an arena page, remove from the subproc os pages list + if (page->memid.memkind != MI_MEM_ARENA && mi_option_is_enabled(mi_option_visit_abandoned)) { + mi_subproc_t* subproc = _mi_subproc(); + mi_lock(&subproc->os_abandoned_pages_lock) { + if (page->prev != NULL) { page->prev->next = page->next; } + if (page->next != NULL) { page->next->prev = page->prev; } + if (subproc->os_abandoned_pages == page) { subproc->os_abandoned_pages = page->next; } + page->next = NULL; + page->prev = NULL; + } + } + } +} + + /* ----------------------------------------------------------- Arena free ----------------------------------------------------------- */ +static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slices); +static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld); -void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_os_tld_t* tld) { - mi_assert_internal(size > 0 && tld->stats != NULL); +void _mi_arenas_free(void* p, size_t size, mi_memid_t memid) { if (p==NULL) return; if (size==0) return; - if (memid == MI_MEMID_OS) { + // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) + mi_track_mem_undefined(p, size); + + if (mi_memkind_is_os(memid.memkind)) { // was a direct OS allocation, pass through - _mi_os_free_ex(p, size, all_committed, tld->stats); + _mi_os_free(p, size, memid); } - else { + else if (memid.memkind == MI_MEM_ARENA) { // allocated in an arena - size_t arena_idx; - size_t bitmap_idx; - mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx); - mi_assert_internal(arena_idx < MI_MAX_ARENAS); - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]); - mi_assert_internal(arena != NULL); - const size_t blocks = mi_block_count_of_size(size); + size_t slice_count; + size_t slice_index; + mi_arena_t* arena = mi_arena_from_memid(memid, &slice_index, &slice_count); + mi_assert_internal((size%MI_ARENA_SLICE_SIZE)==0); + mi_assert_internal((slice_count*MI_ARENA_SLICE_SIZE)==size); + mi_assert_internal(mi_arena_slice_start(arena,slice_index) <= (uint8_t*)p); + mi_assert_internal(mi_arena_slice_start(arena,slice_index) + mi_size_of_slices(slice_count) > (uint8_t*)p); // checks if (arena == NULL) { - _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); + _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } - mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx)); - if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) { - _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); + mi_assert_internal(slice_index < arena->slice_count); + mi_assert_internal(slice_index >= mi_arena_info_slices(arena)); + if (slice_index < mi_arena_info_slices(arena) || slice_index > arena->slice_count) { + _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } + // potentially decommit - if (!arena->allow_decommit || arena->blocks_committed == NULL) { - mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c) - } - else { - mi_assert_internal(arena->blocks_committed != NULL); - _mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, tld->stats); // ok if this fails - _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); + if (!arena->memid.is_pinned /* && !arena->memid.initially_committed */) { // todo: allow decommit even if initially committed? + // (delay) purge the page + mi_arena_schedule_purge(arena, slice_index, slice_count); } - // and make it available to others again - bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); + + // and make it available to others again + bool all_inuse = mi_bbitmap_setN(arena->slices_free, slice_index, slice_count); if (!all_inuse) { - _mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size); + _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", mi_arena_slice_start(arena,slice_index), mi_size_of_slices(slice_count)); return; }; } + else if (memid.memkind == MI_MEM_META) { + _mi_meta_free(p, size, memid); + } + else { + // arena was none, external, or static; nothing to do + mi_assert_internal(mi_memid_needs_no_free(memid)); + } + + // try to purge expired decommits + // mi_arenas_try_purge(false, false, NULL); +} + +// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired +void _mi_arenas_collect(bool force_purge, bool visit_all, mi_tld_t* tld) { + mi_arenas_try_purge(force_purge, visit_all, tld); +} + + +// Is a pointer contained in the given arena area? +bool mi_arena_contains(mi_arena_id_t arena_id, const void* p) { + mi_arena_t* arena = _mi_arena_from_id(arena_id); + return (mi_arena_start(arena) <= (const uint8_t*)p && + mi_arena_start(arena) + mi_size_of_slices(arena->slice_count) >(const uint8_t*)p); +} + +// Is a pointer inside any of our arenas? +bool _mi_arenas_contain(const void* p) { + mi_subproc_t* subproc = _mi_subproc(); + const size_t max_arena = mi_arenas_get_count(subproc); + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]); + if (arena != NULL && mi_arena_contains(arena,p)) { + return true; + } + } + return false; +} + + + +/* ----------------------------------------------------------- + Remove an arena. +----------------------------------------------------------- */ + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +static void mi_arenas_unsafe_destroy(mi_subproc_t* subproc) { + mi_assert_internal(subproc != NULL); + const size_t max_arena = mi_arenas_get_count(subproc); + size_t new_max_arena = 0; + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]); + if (arena != NULL) { + // mi_lock_done(&arena->abandoned_visit_lock); + mi_atomic_store_ptr_release(mi_arena_t, &subproc->arenas[i], NULL); + if (mi_memkind_is_os(arena->memid.memkind)) { + _mi_os_free_ex(mi_arena_start(arena), mi_arena_size(arena), true, arena->memid, subproc); // pass `subproc` to avoid accessing the heap pointer (in `_mi_subproc()`) + } + } + } + + // try to lower the max arena. + size_t expected = max_arena; + mi_atomic_cas_strong_acq_rel(&subproc->arena_count, &expected, new_max_arena); } + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +void _mi_arenas_unsafe_destroy_all(mi_tld_t* tld) { + mi_arenas_unsafe_destroy(tld->subproc); + _mi_arenas_collect(true /* force purge */, true /* visit all*/, tld); // purge non-owned arenas +} + + /* ----------------------------------------------------------- Add an arena. ----------------------------------------------------------- */ -static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) { +static bool mi_arenas_add(mi_subproc_t* subproc, mi_arena_t* arena, mi_arena_id_t* arena_id) { mi_assert_internal(arena != NULL); - mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0); - mi_assert_internal(arena->block_count > 0); - if (arena_id != NULL) *arena_id = -1; + mi_assert_internal(arena->slice_count > 0); + if (arena_id != NULL) { *arena_id = NULL; } + + // first try to find a NULL entry + const size_t count = mi_arenas_get_count(subproc); + size_t i; + for (i = 0; i < count; i++) { + if (mi_arena_from_index(subproc,i) == NULL) { + mi_arena_t* expected = NULL; + if (mi_atomic_cas_ptr_strong_release(mi_arena_t, &subproc->arenas[i], &expected, arena)) { + // success + if (arena_id != NULL) { *arena_id = arena; } + return true; + } + } + } - size_t i = mi_atomic_increment_acq_rel(&mi_arena_count); + // otherwise increase the max + i = mi_atomic_increment_acq_rel(&subproc->arena_count); if (i >= MI_MAX_ARENAS) { - mi_atomic_decrement_acq_rel(&mi_arena_count); + mi_atomic_decrement_acq_rel(&subproc->arena_count); + arena->subproc = NULL; return false; } - mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena); - arena->id = mi_arena_id_create(i); - if (arena_id != NULL) *arena_id = arena->id; + + mi_subproc_stat_counter_increase(arena->subproc, arena_count, 1); + mi_atomic_store_ptr_release(mi_arena_t,&subproc->arenas[i], arena); + if (arena_id != NULL) { *arena_id = arena; } return true; } -bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept +static size_t mi_arena_info_slices_needed(size_t slice_count, size_t* bitmap_base) { + if (slice_count == 0) slice_count = MI_BCHUNK_BITS; + mi_assert_internal((slice_count % MI_BCHUNK_BITS) == 0); + const size_t base_size = _mi_align_up(sizeof(mi_arena_t), MI_BCHUNK_SIZE); + const size_t bitmaps_count = 4 + MI_BIN_COUNT; // commit, dirty, purge, pages, and abandonded + const size_t bitmaps_size = bitmaps_count * mi_bitmap_size(slice_count, NULL) + mi_bbitmap_size(slice_count, NULL); // + free + const size_t size = base_size + bitmaps_size; + + const size_t os_page_size = _mi_os_page_size(); + const size_t info_size = _mi_align_up(size, os_page_size) + _mi_os_secure_guard_page_size(); + const size_t info_slices = mi_slice_count_of_size(info_size); + + if (bitmap_base != NULL) *bitmap_base = base_size; + return info_slices; +} + +static mi_bitmap_t* mi_arena_bitmap_init(size_t slice_count, uint8_t** base) { + mi_bitmap_t* bitmap = (mi_bitmap_t*)(*base); + *base = (*base) + mi_bitmap_init(bitmap, slice_count, true /* already zero */); + return bitmap; +} + +static mi_bbitmap_t* mi_arena_bbitmap_init(size_t slice_count, uint8_t** base) { + mi_bbitmap_t* bbitmap = (mi_bbitmap_t*)(*base); + *base = (*base) + mi_bbitmap_init(bbitmap, slice_count, true /* already zero */); + return bbitmap; +} + + +static bool mi_manage_os_memory_ex2(mi_subproc_t* subproc, void* start, size_t size, int numa_node, bool exclusive, + mi_memid_t memid, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept { - if (arena_id != NULL) *arena_id = _mi_arena_id_none(); - if (size < MI_ARENA_BLOCK_SIZE) return false; + mi_assert(_mi_is_aligned(start,MI_ARENA_SLICE_SIZE)); + mi_assert(start!=NULL); + if (arena_id != NULL) { *arena_id = _mi_arena_id_none(); } + if (start==NULL) return false; + if (!_mi_is_aligned(start,MI_ARENA_SLICE_SIZE)) { + // we can align the start since the memid tracks the real base of the memory. + void* const aligned_start = _mi_align_up_ptr(start, MI_ARENA_SLICE_SIZE); + const size_t diff = (uint8_t*)aligned_start - (uint8_t*)start; + if (diff >= size || (size - diff) < MI_ARENA_SLICE_SIZE) { + _mi_warning_message("after alignment, the size of the arena becomes too small (memory at %p with size %zu)\n", start, size); + return false; + } + start = aligned_start; + size = size - diff; + } - if (is_large) { - mi_assert_internal(is_committed); - is_committed = true; + const size_t slice_count = _mi_align_down(size / MI_ARENA_SLICE_SIZE, MI_BCHUNK_BITS); + if (slice_count > MI_BITMAP_MAX_BIT_COUNT) { // 16 GiB for now + // todo: allow larger areas (either by splitting it up in arena's or having larger arena's) + _mi_warning_message("cannot use OS memory since it is too large (size %zu MiB, maximum is %zu MiB)", size/MI_MiB, mi_size_of_slices(MI_BITMAP_MAX_BIT_COUNT)/MI_MiB); + return false; } - - const size_t bcount = size / MI_ARENA_BLOCK_SIZE; - const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS); - const size_t bitmaps = (is_committed ? 2 : 3); - const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t)); - mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS? - if (arena == NULL) return false; - - arena->id = _mi_arena_id_none(); - arena->exclusive = exclusive; - arena->block_count = bcount; - arena->field_count = fields; - arena->start = (uint8_t*)start; + size_t bitmap_base; + const size_t info_slices = mi_arena_info_slices_needed(slice_count, &bitmap_base); + if (slice_count < info_slices+1) { + _mi_warning_message("cannot use OS memory since it is not large enough (size %zu KiB, minimum required is %zu KiB)", size/MI_KiB, mi_size_of_slices(info_slices+1)/MI_KiB); + return false; + } + else if (info_slices >= MI_ARENA_MAX_OBJ_SLICES) { + _mi_warning_message("cannot use OS memory since it is too large with respect to the maximum object size (size %zu MiB, meta-info slices %zu, maximum object slices are %zu)", size/MI_MiB, info_slices, MI_ARENA_MAX_OBJ_SLICES); + return false; + } + + mi_arena_t* arena = (mi_arena_t*)start; + + // commit & zero if needed + if (!memid.initially_committed) { + size_t commit_size = mi_size_of_slices(info_slices); + // leave a guard OS page decommitted at the end? + if (!memid.is_pinned) { commit_size -= _mi_os_secure_guard_page_size(); } + bool ok = false; + if (commit_fun != NULL) { + ok = (*commit_fun)(true /* commit */, arena, commit_size, NULL, commit_fun_arg); + } + else { + ok = _mi_os_commit(arena, commit_size, NULL); + } + if (!ok) { + _mi_warning_message("unable to commit meta-data for OS memory"); + return false; + } + } + else if (!memid.is_pinned) { + // if MI_SECURE, set a guard page at the end + // todo: this does not respect the commit_fun as the memid is of external memory + _mi_os_secure_guard_page_set_before((uint8_t*)arena + mi_size_of_slices(info_slices), memid); + } + if (!memid.initially_zero) { + _mi_memzero(arena, mi_size_of_slices(info_slices) - _mi_os_secure_guard_page_size()); + } + + // init + arena->subproc = subproc; + arena->memid = memid; + arena->is_exclusive = exclusive; + arena->slice_count = slice_count; + arena->info_slices = info_slices; arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) - arena->is_large = is_large; - arena->is_zero_init = is_zero; - arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory - arena->search_idx = 0; - arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap - arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap - // the bitmaps are already zero initialized due to os_alloc - // initialize committed bitmap? - if (arena->blocks_committed != NULL && is_committed) { - memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning + arena->purge_expire = 0; + arena->commit_fun = commit_fun; + arena->commit_fun_arg = commit_fun_arg; + // mi_lock_init(&arena->abandoned_visit_lock); + + // init bitmaps + uint8_t* base = mi_arena_start(arena) + bitmap_base; + arena->slices_free = mi_arena_bbitmap_init(slice_count,&base); + arena->slices_committed = mi_arena_bitmap_init(slice_count,&base); + arena->slices_dirty = mi_arena_bitmap_init(slice_count,&base); + arena->slices_purge = mi_arena_bitmap_init(slice_count, &base); + arena->pages = mi_arena_bitmap_init(slice_count, &base); + for( size_t i = 0; i < MI_ARENA_BIN_COUNT; i++) { + arena->pages_abandoned[i] = mi_arena_bitmap_init(slice_count,&base); + } + mi_assert_internal(mi_size_of_slices(info_slices) >= (size_t)(base - mi_arena_start(arena))); + + // reserve our meta info (and reserve slices outside the memory area) + mi_bbitmap_unsafe_setN(arena->slices_free, info_slices /* start */, arena->slice_count - info_slices); + if (memid.initially_committed) { + mi_bitmap_unsafe_setN(arena->slices_committed, 0, arena->slice_count); + } + else { + mi_bitmap_setN(arena->slices_committed, 0, info_slices, NULL); + } + if (!memid.initially_zero) { + mi_bitmap_unsafe_setN(arena->slices_dirty, 0, arena->slice_count); } - // and claim leftover blocks if needed (so we never allocate there) - ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; - mi_assert_internal(post >= 0); - if (post > 0) { - // don't use leftover bits at the end - mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post); - _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL); + else { + mi_bitmap_setN(arena->slices_dirty, 0, info_slices, NULL); } - return mi_arena_add(arena, arena_id); + return mi_arenas_add(subproc, arena, arena_id); +} + +bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_pinned, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL); + memid.mem.os.base = start; + memid.mem.os.size = size; + memid.initially_committed = is_committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_pinned; + return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, NULL, NULL, arena_id); } -// Reserve a range of regular OS memory -int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept +bool mi_manage_memory(void* start, size_t size, bool is_committed, bool is_zero, bool is_pinned, int numa_node, bool exclusive, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) mi_attr_noexcept { + mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL); + memid.mem.os.base = start; + memid.mem.os.size = size; + memid.initially_committed = is_committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_pinned; + return mi_manage_os_memory_ex2(_mi_subproc(), start, size, numa_node, exclusive, memid, commit_fun, commit_fun_arg, arena_id); +} + + +// Reserve a range of regular OS memory +static int mi_reserve_os_memory_ex2(mi_subproc_t* subproc, size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) { if (arena_id != NULL) *arena_id = _mi_arena_id_none(); - size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block - bool large = allow_large; - void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main); - if (start==NULL) return ENOMEM; - if (!mi_manage_os_memory_ex(start, size, (large || commit), large, true, -1, exclusive, arena_id)) { - _mi_os_free_ex(start, size, commit, &_mi_stats_main); - _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024)); + size = _mi_align_up(size, MI_ARENA_SLICE_SIZE); // at least one slice + mi_memid_t memid; + void* start = _mi_os_alloc_aligned(size, MI_ARENA_SLICE_ALIGN, commit, allow_large, &memid); + if (start == NULL) return ENOMEM; + if (!mi_manage_os_memory_ex2(subproc, start, size, -1 /* numa node */, exclusive, memid, NULL, NULL, arena_id)) { + _mi_os_free_ex(start, size, commit, memid, NULL); + _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); return ENOMEM; } - _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : ""); + _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), memid.is_pinned ? " (in large os pages)" : ""); + // mi_debug_show_arenas(true, true, false); + return 0; } +// Reserve a range of regular OS memory +int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + return mi_reserve_os_memory_ex2(_mi_subproc(), size, commit, allow_large, exclusive, arena_id); +} + +// Manage a range of regular OS memory bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept { - return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false, NULL); + return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL); } +// Reserve a range of regular OS memory int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept { return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL); } @@ -439,32 +1345,223 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe Debugging ----------------------------------------------------------- */ -static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) { - size_t inuse_count = 0; - for (size_t i = 0; i < field_count; i++) { - char buf[MI_BITMAP_FIELD_BITS + 1]; - uintptr_t field = mi_atomic_load_relaxed(&fields[i]); - for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) { - bool inuse = ((((uintptr_t)1 << bit) & field) != 0); - if (inuse) inuse_count++; - buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.'); +// Return idx of the slice past the last used slice +static size_t mi_arena_used_slices(mi_arena_t* arena) { + size_t idx; + if (mi_bitmap_bsr(arena->pages, &idx)) { + mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, idx); + const size_t page_slice_count = page->memid.mem.arena.slice_count; + return (idx + page_slice_count); + } + else { + return mi_arena_info_slices(arena); + } +} + +static size_t mi_debug_show_bfield(mi_bfield_t field, char* buf, size_t* k) { + size_t bit_set_count = 0; + for (int bit = 0; bit < MI_BFIELD_BITS; bit++) { + bool is_set = ((((mi_bfield_t)1 << bit) & field) != 0); + if (is_set) bit_set_count++; + buf[*k++] = (is_set ? 'x' : '.'); + } + return bit_set_count; +} + +typedef enum mi_ansi_color_e { + MI_BLACK = 30, + MI_MAROON, + MI_DARKGREEN, + MI_ORANGE, + MI_NAVY, + MI_PURPLE, + MI_TEAL, + MI_GRAY, + MI_DARKGRAY = 90, + MI_RED, + MI_GREEN, + MI_YELLOW, + MI_BLUE, + MI_MAGENTA, + MI_CYAN, + MI_WHITE +} mi_ansi_color_t; + +static void mi_debug_color(char* buf, size_t* k, mi_ansi_color_t color) { + *k += _mi_snprintf(buf + *k, 32, "\x1B[%dm", (int)color); +} + +static int mi_page_commit_usage(mi_page_t* page) { + // if (mi_page_size(page) <= MI_PAGE_MIN_COMMIT_SIZE) return 100; + const size_t committed_size = mi_page_committed(page); + const size_t used_size = page->used * mi_page_block_size(page); + return (int)(used_size * 100 / committed_size); +} + +static size_t mi_debug_show_page_bfield(mi_bfield_t field, char* buf, size_t* k, mi_arena_t* arena, size_t slice_index, long* pbit_of_page, mi_ansi_color_t* pcolor_of_page ) { + size_t bit_set_count = 0; + long bit_of_page = *pbit_of_page; + mi_ansi_color_t color = *pcolor_of_page; + mi_ansi_color_t prev_color = MI_GRAY; + for (int bit = 0; bit < MI_BFIELD_BITS; bit++, bit_of_page--) { + bool is_set = ((((mi_bfield_t)1 << bit) & field) != 0); + void* start = mi_arena_slice_start(arena, slice_index + bit); + char c = ' '; + if (is_set) { + mi_assert_internal(bit_of_page <= 0); + bit_set_count++; + c = 'p'; + color = MI_GRAY; + mi_page_t* page = (mi_page_t*)start; + if (mi_page_is_singleton(page)) { c = 's'; } + else if (mi_page_is_full(page)) { c = 'f'; } + if (!mi_page_is_abandoned(page)) { c = _mi_toupper(c); } + int commit_usage = mi_page_commit_usage(page); + if (commit_usage < 25) { color = MI_MAROON; } + else if (commit_usage < 50) { color = MI_ORANGE; } + else if (commit_usage < 75) { color = MI_TEAL; } + else color = MI_DARKGREEN; + bit_of_page = (long)page->memid.mem.arena.slice_count; } - buf[MI_BITMAP_FIELD_BITS] = 0; - _mi_verbose_message("%s%s\n", prefix, buf); + else { + c = '?'; + if (bit_of_page > 0) { c = '-'; } + else if (_mi_meta_is_meta_page(start)) { c = 'm'; color = MI_GRAY; } + else if (slice_index + bit < arena->info_slices) { c = 'i'; color = MI_GRAY; } + // else if (mi_bitmap_is_setN(arena->pages_purge, slice_index + bit, NULL)) { c = '*'; } + else if (mi_bbitmap_is_setN(arena->slices_free, slice_index+bit,1)) { + if (mi_bitmap_is_set(arena->slices_purge, slice_index + bit)) { c = '~'; color = MI_ORANGE; } + else if (mi_bitmap_is_setN(arena->slices_committed, slice_index + bit, 1)) { c = '_'; color = MI_GRAY; } + else { c = '.'; color = MI_GRAY; } + } + if (bit==MI_BFIELD_BITS-1 && bit_of_page > 1) { c = '>'; } + } + if (color != prev_color) { + mi_debug_color(buf, k, color); + prev_color = color; + } + buf[*k] = c; *k += 1; } - return inuse_count; + mi_debug_color(buf, k, MI_GRAY); + *pbit_of_page = bit_of_page; + *pcolor_of_page = color; + return bit_set_count; } -void mi_debug_show_arenas(void) mi_attr_noexcept { - size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count); +static size_t mi_debug_show_chunks(const char* header1, const char* header2, const char* header3, + size_t slice_count, size_t chunk_count, + mi_bchunk_t* chunks, mi_bchunkmap_t* chunk_bins, bool invert, mi_arena_t* arena, bool narrow) +{ + _mi_raw_message("\x1B[37m%s%s%s (use/commit: \x1B[31m0 - 25%%\x1B[33m - 50%%\x1B[36m - 75%%\x1B[32m - 100%%\x1B[0m)\n", header1, header2, header3); + const size_t fields_per_line = (narrow ? 2 : 4); + const size_t used_slice_count = mi_arena_used_slices(arena); + size_t bit_count = 0; + size_t bit_set_count = 0; + for (size_t i = 0; i < chunk_count && bit_count < slice_count; i++) { + char buf[5*MI_BCHUNK_BITS + 64]; _mi_memzero(buf, sizeof(buf)); + if (bit_count > used_slice_count && i+2 < chunk_count) { + const size_t diff = chunk_count - 1 - i; + bit_count += diff*MI_BCHUNK_BITS; + _mi_raw_message(" |\n"); + i = chunk_count-1; + } + + size_t k = 0; + mi_bchunk_t* chunk = &chunks[i]; + + if (i<10) { buf[k++] = ('0' + (char)i); buf[k++] = ' '; buf[k++] = ' '; } + else if (i<100) { buf[k++] = ('0' + (char)(i/10)); buf[k++] = ('0' + (char)(i%10)); buf[k++] = ' '; } + else if (i<1000) { buf[k++] = ('0' + (char)(i/100)); buf[k++] = ('0' + (char)((i%100)/10)); buf[k++] = ('0' + (char)(i%10)); } + + char chunk_kind = ' '; + if (chunk_bins != NULL) { + switch (mi_bbitmap_debug_get_bin(chunk_bins,i)) { + case MI_CBIN_SMALL: chunk_kind = 'S'; break; + case MI_CBIN_MEDIUM: chunk_kind = 'M'; break; + case MI_CBIN_LARGE: chunk_kind = 'L'; break; + case MI_CBIN_OTHER: chunk_kind = 'X'; break; + default: chunk_kind = ' '; break; // suppress warning + // case MI_CBIN_NONE: chunk_kind = 'N'; break; + } + } + buf[k++] = chunk_kind; + buf[k++] = ' '; + + long bit_of_page = 0; + mi_ansi_color_t color_of_page = MI_GRAY; + for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) { + if (j > 0 && (j % fields_per_line) == 0) { + // buf[k++] = '\n'; _mi_memset(buf+k,' ',7); k += 7; + _mi_raw_message(" %s\n\x1B[37m", buf); + _mi_memzero(buf, sizeof(buf)); + _mi_memset(buf, ' ', 5); k = 5; + } + if (bit_count < slice_count) { + mi_bfield_t bfield = chunk->bfields[j]; + if (invert) bfield = ~bfield; + size_t xcount = (arena!=NULL ? mi_debug_show_page_bfield(bfield, buf, &k, arena, bit_count, &bit_of_page, &color_of_page) + : mi_debug_show_bfield(bfield, buf, &k)); + if (invert) xcount = MI_BFIELD_BITS - xcount; + bit_set_count += xcount; + buf[k++] = ' '; + } + else { + _mi_memset(buf + k, 'o', MI_BFIELD_BITS); + k += MI_BFIELD_BITS; + } + bit_count += MI_BFIELD_BITS; + } + _mi_raw_message(" %s\n\x1B[37m", buf); + } + _mi_raw_message("\x1B[0m total ('x'): %zu\n", bit_set_count); + return bit_set_count; +} + +static size_t mi_debug_show_bitmap_binned(const char* header1, const char* header2, const char* header3, size_t slice_count, mi_bitmap_t* bitmap, mi_bchunkmap_t* chunk_bins, bool invert, mi_arena_t* arena, bool narrow) { + return mi_debug_show_chunks(header1, header2, header3, slice_count, mi_bitmap_chunk_count(bitmap), &bitmap->chunks[0], chunk_bins, invert, arena, narrow); +} + +static void mi_debug_show_arenas_ex(bool show_pages, bool narrow) mi_attr_noexcept { + mi_subproc_t* subproc = _mi_subproc(); + size_t max_arenas = mi_arenas_get_count(subproc); + //size_t free_total = 0; + //size_t slice_total = 0; + //size_t abandoned_total = 0; + size_t page_total = 0; for (size_t i = 0; i < max_arenas; i++) { - mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &subproc->arenas[i]); if (arena == NULL) break; - size_t inuse_count = 0; - _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count); - inuse_count += mi_debug_show_bitmap(" ", arena->blocks_inuse, arena->field_count); - _mi_verbose_message(" blocks in use ('x'): %zu\n", inuse_count); + mi_assert(arena->subproc == subproc); + // slice_total += arena->slice_count; + _mi_raw_message("arena %zu at %p: %zu slices (%zu MiB)%s, subproc: %p\n", i, arena, arena->slice_count, (size_t)(mi_size_of_slices(arena->slice_count)/MI_MiB), (arena->memid.is_pinned ? ", pinned" : ""), arena->subproc); + //if (show_inuse) { + // free_total += mi_debug_show_bbitmap("in-use slices", arena->slice_count, arena->slices_free, true, NULL); + //} + //if (show_committed) { + // mi_debug_show_bitmap("committed slices", arena->slice_count, arena->slices_committed, false, NULL); + //} + // todo: abandoned slices + //if (show_purge) { + // purge_total += mi_debug_show_bitmap("purgeable slices", arena->slice_count, arena->slices_purge, false, NULL); + //} + if (show_pages) { + const char* header1 = "pages (p:page, f:full, s:singleton, P,F,S:not abandoned, i:arena-info, m:meta-data, ~:free-purgable, _:free-committed, .:free-reserved)"; + const char* header2 = (narrow ? "\n " : " "); + const char* header3 = "(chunk bin: S:small, M : medium, L : large, X : other)"; + page_total += mi_debug_show_bitmap_binned(header1, header2, header3, arena->slice_count, arena->pages, arena->slices_free->chunkmap_bins, false, arena, narrow); + } } + // if (show_inuse) _mi_raw_message("total inuse slices : %zu\n", slice_total - free_total); + // if (show_abandoned) _mi_raw_message("total abandoned slices: %zu\n", abandoned_total); + if (show_pages) _mi_raw_message("total pages in arenas: %zu\n", page_total); +} + +void mi_debug_show_arenas(void) mi_attr_noexcept { + mi_debug_show_arenas_ex(true /* show pages */, false /* narrow? */); +} + +void mi_arenas_print(void) mi_attr_noexcept { + mi_debug_show_arenas(); } @@ -473,21 +1570,22 @@ void mi_debug_show_arenas(void) mi_attr_noexcept { ----------------------------------------------------------- */ // reserve at a specific numa node int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { - if (arena_id != NULL) *arena_id = -1; + if (arena_id != NULL) *arena_id = NULL; if (pages==0) return 0; if (numa_node < -1) numa_node = -1; if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count(); size_t hsize = 0; size_t pages_reserved = 0; - void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize); + mi_memid_t memid; + void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid); if (p==NULL || pages_reserved==0) { _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages); return ENOMEM; } _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); - if (!mi_manage_os_memory_ex(p, hsize, true, true, true, numa_node, exclusive, arena_id)) { - _mi_os_free_huge_pages(p, hsize, &_mi_stats_main); + if (!mi_manage_os_memory_ex2(_mi_subproc(), p, hsize, numa_node, exclusive, memid, NULL, NULL, arena_id)) { + _mi_os_free(p, hsize, memid); return ENOMEM; } return 0; @@ -502,17 +1600,17 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t if (pages == 0) return 0; // pages per numa node - size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count()); - if (numa_count <= 0) numa_count = 1; + int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? (int)numa_nodes : _mi_os_numa_node_count()); + if (numa_count <= 0) { numa_count = 1; } const size_t pages_per = pages / numa_count; const size_t pages_mod = pages % numa_count; const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50); // reserve evenly among numa nodes - for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { + for (int numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { size_t node_pages = pages_per; // can be 0 - if (numa_node < pages_mod) node_pages++; - int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per); + if ((size_t)numa_node < pages_mod) { node_pages++; } + int err = mi_reserve_huge_os_pages_at(node_pages, numa_node, timeout_per); if (err) return err; if (pages < node_pages) { pages = 0; @@ -533,3 +1631,363 @@ int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserv if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; return err; } + + + + + +/* ----------------------------------------------------------- + Arena purge +----------------------------------------------------------- */ + +static long mi_arena_purge_delay(void) { + // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay + return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult)); +} + +// reset or decommit in an arena and update the commit bitmap +// assumes we own the area (i.e. slices_free is claimed by us) +// returns if the memory is no longer committed (versus reset which keeps the commit) +static bool mi_arena_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) { + mi_assert_internal(!arena->memid.is_pinned); + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + + const size_t size = mi_size_of_slices(slice_count); + void* const p = mi_arena_slice_start(arena, slice_index); + //const bool all_committed = mi_bitmap_is_setN(arena->slices_committed, slice_index, slice_count); + size_t already_committed; + mi_bitmap_setN(arena->slices_committed, slice_index, slice_count, &already_committed); // pretend all committed.. (as we lack a clearN call that counts the already set bits..) + const bool all_committed = (already_committed == slice_count); + const bool needs_recommit = _mi_os_purge_ex(p, size, all_committed /* allow reset? */, mi_size_of_slices(already_committed), arena->commit_fun, arena->commit_fun_arg); + + if (needs_recommit) { + // no longer committed + mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count); + // we just counted in the purge to decommit all, but the some part was not committed so adjust that here + // mi_os_stat_decrease(committed, mi_size_of_slices(slice_count - already_committed)); + } + else if (!all_committed) { + // we cannot assume any of these are committed any longer (even with reset since we did setN and may have marked uncommitted slices as committed) + mi_bitmap_clearN(arena->slices_committed, slice_index, slice_count); + // we adjust the commit count as parts will be re-committed + // mi_os_stat_decrease(committed, mi_size_of_slices(already_committed)); + } + + return needs_recommit; +} + + +// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. +// Note: assumes we (still) own the area as we may purge immediately +static void mi_arena_schedule_purge(mi_arena_t* arena, size_t slice_index, size_t slice_count) { + const long delay = mi_arena_purge_delay(); + if (arena->memid.is_pinned || delay < 0 || _mi_preloading()) return; // is purging allowed at all? + + mi_assert_internal(mi_bbitmap_is_clearN(arena->slices_free, slice_index, slice_count)); + if (delay == 0) { + // purge directly + mi_arena_purge(arena, slice_index, slice_count); + } + else { + // schedule purge + const mi_msecs_t expire = _mi_clock_now() + delay; + mi_msecs_t expire0 = 0; + if (mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire0, expire)) { + // expiration was not yet set + // maybe set the global arenas expire as well (if it wasn't set already) + mi_assert_internal(expire0==0); + mi_atomic_casi64_strong_acq_rel(&arena->subproc->purge_expire, &expire0, expire); + } + else { + // already an expiration was set + } + mi_bitmap_setN(arena->slices_purge, slice_index, slice_count, NULL); + } +} + +typedef struct mi_purge_visit_info_s { + mi_msecs_t now; + mi_msecs_t delay; + bool all_purged; + bool any_purged; +} mi_purge_visit_info_t; + +static bool mi_arena_try_purge_range(mi_arena_t* arena, size_t slice_index, size_t slice_count) { + if (mi_bbitmap_try_clearN(arena->slices_free, slice_index, slice_count)) { + // purge + bool decommitted = mi_arena_purge(arena, slice_index, slice_count); MI_UNUSED(decommitted); + mi_assert_internal(!decommitted || mi_bitmap_is_clearN(arena->slices_committed, slice_index, slice_count)); + // and reset the free range + mi_bbitmap_setN(arena->slices_free, slice_index, slice_count); + return true; + } + else { + // was allocated again already + return false; + } +} + +static bool mi_arena_try_purge_visitor(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) { + mi_purge_visit_info_t* vinfo = (mi_purge_visit_info_t*)arg; + // try to purge: first claim the free blocks + if (mi_arena_try_purge_range(arena, slice_index, slice_count)) { + vinfo->any_purged = true; + vinfo->all_purged = true; + } + else if (slice_count > 1) + { + // failed to claim the full range, try per slice instead + for (size_t i = 0; i < slice_count; i++) { + const bool purged = mi_arena_try_purge_range(arena, slice_index + i, 1); + vinfo->any_purged = vinfo->any_purged || purged; + vinfo->all_purged = vinfo->all_purged && purged; + } + } + // don't clear the purge bits as that is done atomically be the _bitmap_forall_set_ranges + // mi_bitmap_clearN(arena->slices_purge, slice_index, slice_count); + return true; // continue +} + +// returns true if anything was purged +static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force) +{ + // check pre-conditions + if (arena->memid.is_pinned) return false; + + // expired yet? + mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); + if (!force && (expire == 0 || expire > now)) return false; + + // reset expire + mi_atomic_storei64_release(&arena->purge_expire, (mi_msecs_t)0); + mi_subproc_stat_counter_increase(arena->subproc, arena_purges, 1); + + // go through all purge info's (with max MI_BFIELD_BITS ranges at a time) + // this also clears those ranges atomically (so any newly freed blocks will get purged next + // time around) + mi_purge_visit_info_t vinfo = { now, mi_arena_purge_delay(), true /*all?*/, false /*any?*/}; + _mi_bitmap_forall_setc_ranges(arena->slices_purge, &mi_arena_try_purge_visitor, arena, &vinfo); + + return vinfo.any_purged; +} + + +static void mi_arenas_try_purge(bool force, bool visit_all, mi_tld_t* tld) +{ + // try purge can be called often so try to only run when needed + const long delay = mi_arena_purge_delay(); + if (_mi_preloading() || delay <= 0) return; // nothing will be scheduled + + // check if any arena needs purging? + mi_subproc_t* subproc = tld->subproc; + const mi_msecs_t now = _mi_clock_now(); + const mi_msecs_t arenas_expire = mi_atomic_loadi64_acquire(&subproc->purge_expire); + if (!visit_all && !force && (arenas_expire == 0 || arenas_expire > now)) return; + + const size_t max_arena = mi_arenas_get_count(subproc); + if (max_arena == 0) return; + + // allow only one thread to purge at a time (todo: allow concurrent purging?) + static mi_atomic_guard_t purge_guard; + mi_atomic_guard(&purge_guard) + { + // increase global expire: at most one purge per delay cycle + if (arenas_expire > now) { mi_atomic_storei64_release(&subproc->purge_expire, now + (delay/10)); } + const size_t arena_start = tld->thread_seq % max_arena; + size_t max_purge_count = (visit_all ? max_arena : (max_arena/4)+1); + bool all_visited = true; + bool any_purged = false; + for (size_t _i = 0; _i < max_arena; _i++) { + size_t i = _i + arena_start; + if (i >= max_arena) { i -= max_arena; } + mi_arena_t* arena = mi_arena_from_index(subproc,i); + if (arena != NULL) { + if (mi_arena_try_purge(arena, now, force)) { + any_purged = true; + if (max_purge_count <= 1) { + all_visited = false; + break; + } + max_purge_count--; + } + } + } + if (all_visited && !any_purged) { + mi_atomic_storei64_release(&subproc->purge_expire, 0); + } + } +} + +/* ----------------------------------------------------------- + Visit abandoned pages +----------------------------------------------------------- */ + +typedef struct mi_abandoned_page_visit_info_s { + int heap_tag; + mi_block_visit_fun* visitor; + void* arg; + bool visit_blocks; +} mi_abandoned_page_visit_info_t; + +static bool abandoned_page_visit(mi_page_t* page, mi_abandoned_page_visit_info_t* vinfo) { + if (page->heap_tag != vinfo->heap_tag) { return true; } // continue + mi_heap_area_t area; + _mi_heap_area_init(&area, page); + if (!vinfo->visitor(NULL, &area, NULL, area.block_size, vinfo->arg)) { + return false; + } + if (vinfo->visit_blocks) { + return _mi_heap_area_visit_blocks(&area, page, vinfo->visitor, vinfo->arg); + } + else { + return true; + } +} + +static bool abandoned_page_visit_at(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) { + MI_UNUSED(slice_count); + mi_abandoned_page_visit_info_t* vinfo = (mi_abandoned_page_visit_info_t*)arg; + mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); + mi_assert_internal(mi_page_is_abandoned_mapped(page)); + return abandoned_page_visit(page, vinfo); +} + +// Visit all abandoned pages in this subproc. +bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + mi_abandoned_page_visit_info_t visit_info = { heap_tag, visitor, arg, visit_blocks }; + MI_UNUSED(subproc_id); MI_UNUSED(heap_tag); MI_UNUSED(visit_blocks); MI_UNUSED(visitor); MI_UNUSED(arg); + + // visit abandoned pages in the arenas + // we don't have to claim because we assume we are the only thread running (in this subproc). + // (but we could atomically claim as well by first doing abandoned_reclaim and afterwards reabandoning). + bool ok = true; + mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id); + mi_forall_arenas(subproc, NULL, 0, arena) { + mi_assert_internal(arena->subproc == subproc); + for (size_t bin = 0; ok && bin < MI_BIN_COUNT; bin++) { + // todo: if we had a single abandoned page map as well, this can be faster. + if (mi_atomic_load_relaxed(&subproc->abandoned_count[bin]) > 0) { + ok = _mi_bitmap_forall_set(arena->pages_abandoned[bin], &abandoned_page_visit_at, arena, &visit_info); + } + } + } + mi_forall_arenas_end(); + if (!ok) return false; + + // visit abandoned pages in OS allocated memory + // (technically we don't need the lock as we assume we are the only thread running in this subproc) + mi_lock(&subproc->os_abandoned_pages_lock) { + for (mi_page_t* page = subproc->os_abandoned_pages; ok && page != NULL; page = page->next) { + ok = abandoned_page_visit(page, &visit_info); + } + } + + return ok; +} + + +/* ----------------------------------------------------------- + Unloading and reloading an arena. +----------------------------------------------------------- */ +static bool mi_arena_page_register(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg) { + MI_UNUSED(arg); MI_UNUSED(slice_count); + mi_assert_internal(slice_count == 1); + mi_page_t* page = (mi_page_t*)mi_arena_slice_start(arena, slice_index); + mi_assert_internal(mi_bitmap_is_setN(page->memid.mem.arena.arena->pages, page->memid.mem.arena.slice_index, 1)); + _mi_page_map_register(page); + mi_assert_internal(_mi_ptr_page(page)==page); + return true; +} + +static bool mi_arena_pages_reregister(mi_arena_t* arena) { + return _mi_bitmap_forall_set(arena->pages, &mi_arena_page_register, arena, NULL); +} + +mi_decl_export bool mi_arena_unload(mi_arena_id_t arena_id, void** base, size_t* accessed_size, size_t* full_size) { + mi_arena_t* arena = _mi_arena_from_id(arena_id); + if (arena==NULL) { + return false; + } + else if (!arena->is_exclusive) { + _mi_warning_message("cannot unload a non-exclusive arena (id %zu at %p)\n", arena_id, arena); + return false; + } + else if (arena->memid.memkind != MI_MEM_EXTERNAL) { + _mi_warning_message("can only unload managed arena's for external memory (id %zu at %p)\n", arena_id, arena); + return false; + } + + // find accessed size + const size_t asize = mi_size_of_slices(mi_arena_used_slices(arena)); + if (base != NULL) { *base = (void*)arena; } + if (full_size != NULL) { *full_size = arena->memid.mem.os.size; } + if (accessed_size != NULL) { *accessed_size = asize; } + + // adjust abandoned page count + mi_subproc_t* const subproc = arena->subproc; + for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) { + const size_t count = mi_bitmap_popcount(arena->pages_abandoned[bin]); + if (count > 0) { mi_atomic_decrement_acq_rel(&subproc->abandoned_count[bin]); } + } + + // unregister the pages + _mi_page_map_unregister_range(arena, asize); + + // set arena entry to NULL + const size_t count = mi_arenas_get_count(subproc); + for(size_t i = 0; i < count; i++) { + if (mi_arena_from_index(subproc, i) == arena) { + mi_atomic_store_ptr_release(mi_arena_t, &subproc->arenas[i], NULL); + if (i + 1 == count) { // try adjust the count? + size_t expected = count; + mi_atomic_cas_strong_acq_rel(&subproc->arena_count, &expected, count-1); + } + break; + } + } + return true; +} + +mi_decl_export bool mi_arena_reload(void* start, size_t size, mi_commit_fun_t* commit_fun, void* commit_fun_arg, mi_arena_id_t* arena_id) { + // assume the memory area is already containing the arena + if (arena_id != NULL) { *arena_id = _mi_arena_id_none(); } + if (start == NULL || size == 0) return false; + mi_arena_t* arena = (mi_arena_t*)start; + mi_memid_t memid = arena->memid; + if (memid.memkind != MI_MEM_EXTERNAL) { + _mi_warning_message("can only reload arena's from external memory (%p)\n", arena); + return false; + } + if (memid.mem.os.base != start) { + _mi_warning_message("the reloaded arena base address differs from the external memory (arena: %p, external: %p)\n", arena, start); + return false; + } + if (memid.mem.os.size != size) { + _mi_warning_message("the reloaded arena size differs from the external memory (arena size: %zu, external size: %zu)\n", arena->memid.mem.os.size, size); + return false; + } + if (!arena->is_exclusive) { + _mi_warning_message("the reloaded arena is not exclusive\n"); + return false; + } + + // re-initialize + arena->is_exclusive = true; + arena->commit_fun = commit_fun; + arena->commit_fun_arg = commit_fun_arg; + arena->subproc = _mi_subproc(); + if (!mi_arenas_add(arena->subproc, arena, arena_id)) { + return false; + } + mi_arena_pages_reregister(arena); + + // adjust abandoned page count + for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) { + const size_t count = mi_bitmap_popcount(arena->pages_abandoned[bin]); + if (count > 0) { mi_atomic_decrement_acq_rel(&arena->subproc->abandoned_count[bin]); } + } + + return true; +} + + diff --git a/depends/mimalloc/src/bitmap.c b/depends/mimalloc/src/bitmap.c index 4fc7a1f3d140..66ebc157b04c 100644 --- a/depends/mimalloc/src/bitmap.c +++ b/depends/mimalloc/src/bitmap.c @@ -1,414 +1,1730 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2019-2021 Microsoft Research, Daan Leijen +Copyright (c) 2019-2024 Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ /* ---------------------------------------------------------------------------- -Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`size_t`) - -There are two api's; the standard one cannot have sequences that cross -between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). -(this is used in region allocation) - -The `_across` postfixed functions do allow sequences that can cross over -between the fields. (This is used in arena allocation) +Concurrent bitmap that can set/reset sequences of bits atomically ---------------------------------------------------------------------------- */ #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" +#include "mimalloc/bits.h" #include "bitmap.h" -/* ----------------------------------------------------------- - Bitmap definition ------------------------------------------------------------ */ +#ifndef MI_OPT_SIMD +#define MI_OPT_SIMD 0 +#endif + +/* -------------------------------------------------------------------------------- + bfields +-------------------------------------------------------------------------------- */ -// The bit mask for a given number of blocks at a specified bit index. -static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) { - mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS); - mi_assert_internal(count > 0); - if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL; - if (count == 0) return 0; - return ((((size_t)1 << count) - 1) << bitidx); +static inline size_t mi_bfield_ctz(mi_bfield_t x) { + return mi_ctz(x); } +static inline size_t mi_bfield_clz(mi_bfield_t x) { + return mi_clz(x); +} -/* ----------------------------------------------------------- - Claim a bit sequence atomically ------------------------------------------------------------ */ +static inline size_t mi_bfield_popcount(mi_bfield_t x) { + return mi_popcount(x); +} -// Try to atomically claim a sequence of `count` bits in a single -// field at `idx` in `bitmap`. Returns `true` on success. -inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx) -{ - mi_assert_internal(bitmap_idx != NULL); - mi_assert_internal(count <= MI_BITMAP_FIELD_BITS); - mi_assert_internal(count > 0); - mi_bitmap_field_t* field = &bitmap[idx]; - size_t map = mi_atomic_load_relaxed(field); - if (map==MI_BITMAP_FIELD_FULL) return false; // short cut - - // search for 0-bit sequence of length count - const size_t mask = mi_bitmap_mask_(count, 0); - const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count; - -#ifdef MI_HAVE_FAST_BITSCAN - size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible +static inline mi_bfield_t mi_bfield_clear_least_bit(mi_bfield_t x) { + return (x & (x-1)); +} + +// find the least significant bit that is set (i.e. count trailing zero's) +// return false if `x==0` (with `*idx` undefined) and true otherwise, +// with the `idx` is set to the bit index (`0 <= *idx < MI_BFIELD_BITS`). +static inline bool mi_bfield_find_least_bit(mi_bfield_t x, size_t* idx) { + return mi_bsf(x,idx); +} + +// find the most significant bit that is set. +// return false if `x==0` (with `*idx` undefined) and true otherwise, +// with the `idx` is set to the bit index (`0 <= *idx < MI_BFIELD_BITS`). +static inline bool mi_bfield_find_highest_bit(mi_bfield_t x, size_t* idx) { + return mi_bsr(x, idx); +} + + + +// find each set bit in a bit field `x` and clear it, until it becomes zero. +static inline bool mi_bfield_foreach_bit(mi_bfield_t* x, size_t* idx) { + const bool found = mi_bfield_find_least_bit(*x, idx); + *x = mi_bfield_clear_least_bit(*x); + return found; +} + +static inline mi_bfield_t mi_bfield_zero(void) { + return 0; +} + +static inline mi_bfield_t mi_bfield_one(void) { + return 1; +} + +static inline mi_bfield_t mi_bfield_all_set(void) { + return ~((mi_bfield_t)0); +} + +// mask of `bit_count` bits set shifted to the left by `shiftl` +static inline mi_bfield_t mi_bfield_mask(size_t bit_count, size_t shiftl) { + mi_assert_internal(bit_count > 0); + mi_assert_internal(bit_count + shiftl <= MI_BFIELD_BITS); + const mi_bfield_t mask0 = (bit_count < MI_BFIELD_BITS ? (mi_bfield_one() << bit_count)-1 : mi_bfield_all_set()); + return (mask0 << shiftl); +} + + +// ------- mi_bfield_atomic_set --------------------------------------- +// the `_set` functions return also the count of bits that were already set (for commit statistics) +// the `_clear` functions return also whether the new bfield is all clear or not (for the chunk_map) + +// Set a bit atomically. Returns `true` if the bit transitioned from 0 to 1 +static inline bool mi_bfield_atomic_set(_Atomic(mi_bfield_t)*b, size_t idx) { + mi_assert_internal(idx < MI_BFIELD_BITS); + const mi_bfield_t mask = mi_bfield_mask(1, idx);; + const mi_bfield_t old = mi_atomic_or_acq_rel(b, mask); + return ((old&mask) == 0); +} + +// Clear a bit atomically. Returns `true` if the bit transitioned from 1 to 0. +// `all_clear` is set if the new bfield is zero. +static inline bool mi_bfield_atomic_clear(_Atomic(mi_bfield_t)*b, size_t idx, bool* all_clear) { + mi_assert_internal(idx < MI_BFIELD_BITS); + const mi_bfield_t mask = mi_bfield_mask(1, idx);; + mi_bfield_t old = mi_atomic_and_acq_rel(b, ~mask); + if (all_clear != NULL) { *all_clear = ((old&~mask)==0); } + return ((old&mask) == mask); +} + +// Clear a bit but only when/once it is set. This is used by concurrent free's while +// the page is abandoned and mapped. This can incure a busy wait :-( but it should +// happen almost never (and is accounted for in the stats) +static inline void mi_bfield_atomic_clear_once_set(_Atomic(mi_bfield_t)*b, size_t idx) { + mi_assert_internal(idx < MI_BFIELD_BITS); + const mi_bfield_t mask = mi_bfield_mask(1, idx);; + mi_bfield_t old = mi_atomic_load_relaxed(b); + do { + if mi_unlikely((old&mask) == 0) { + old = mi_atomic_load_acquire(b); + if ((old&mask)==0) { + mi_subproc_stat_counter_increase(_mi_subproc(), pages_unabandon_busy_wait, 1); + } + while ((old&mask)==0) { // busy wait + mi_atomic_yield(); + old = mi_atomic_load_acquire(b); + } + } + } while (!mi_atomic_cas_weak_acq_rel(b,&old, (old&~mask))); + mi_assert_internal((old&mask)==mask); // we should only clear when it was set +} + +// Set a mask set of bits atomically, and return true of the mask bits transitioned from all 0's to 1's. +// `already_set` contains the count of bits that were already set (used when committing ranges to account +// statistics correctly). +static inline bool mi_bfield_atomic_set_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t mask, size_t* already_set) { + mi_assert_internal(mask != 0); + mi_bfield_t old = mi_atomic_load_relaxed(b); + while (!mi_atomic_cas_weak_acq_rel(b, &old, old|mask)) {}; // try to atomically set the mask bits until success + if (already_set!=NULL) { *already_set = mi_bfield_popcount(old&mask); } + return ((old&mask) == 0); +} + +// Clear a mask set of bits atomically, and return true of the mask bits transitioned from all 1's to 0's +// `all_clear` is set to `true` if the new bfield became zero. +static inline bool mi_bfield_atomic_clear_mask(_Atomic(mi_bfield_t)*b, mi_bfield_t mask, bool* all_clear) { + mi_assert_internal(mask != 0); + mi_bfield_t old = mi_atomic_load_relaxed(b); + while (!mi_atomic_cas_weak_acq_rel(b, &old, old&~mask)) {}; // try to atomically clear the mask bits until success + if (all_clear != NULL) { *all_clear = ((old&~mask)==0); } + return ((old&mask) == mask); +} + +static inline bool mi_bfield_atomic_setX(_Atomic(mi_bfield_t)*b, size_t* already_set) { + const mi_bfield_t old = mi_atomic_exchange_release(b, mi_bfield_all_set()); + if (already_set!=NULL) { *already_set = mi_bfield_popcount(old); } + return (old==0); +} + +// static inline bool mi_bfield_atomic_clearX(_Atomic(mi_bfield_t)*b, bool* all_clear) { +// const mi_bfield_t old = mi_atomic_exchange_release(b, mi_bfield_zero()); +// if (all_clear!=NULL) { *all_clear = true; } +// return (~old==0); +// } + +// ------- mi_bfield_atomic_try_clear --------------------------------------- + + +// Tries to clear a mask atomically, and returns true if the mask bits atomically transitioned from mask to 0 +// and false otherwise (leaving the bit field as is). +// `all_clear` is set to `true` if the new bfield became zero. +static inline bool mi_bfield_atomic_try_clear_mask_of(_Atomic(mi_bfield_t)*b, mi_bfield_t mask, mi_bfield_t expect, bool* all_clear) { + mi_assert_internal(mask != 0); + // try to atomically clear the mask bits + do { + if ((expect & mask) != mask) { // are all bits still set? + if (all_clear != NULL) { *all_clear = (expect == 0); } + return false; + } + } while (!mi_atomic_cas_weak_acq_rel(b, &expect, expect & ~mask)); + if (all_clear != NULL) { *all_clear = ((expect & ~mask) == 0); } + return true; +} + +static inline bool mi_bfield_atomic_try_clear_mask(_Atomic(mi_bfield_t)* b, mi_bfield_t mask, bool* all_clear) { + mi_assert_internal(mask != 0); + const mi_bfield_t expect = mi_atomic_load_relaxed(b); + return mi_bfield_atomic_try_clear_mask_of(b, mask, expect, all_clear); +} + +// Tries to clear a bit atomically. Returns `true` if the bit transitioned from 1 to 0 +// and `false` otherwise leaving the bfield `b` as-is. +// `all_clear` is set to true if the new bfield became zero (and false otherwise) +mi_decl_maybe_unused static inline bool mi_bfield_atomic_try_clear(_Atomic(mi_bfield_t)* b, size_t idx, bool* all_clear) { + mi_assert_internal(idx < MI_BFIELD_BITS); + const mi_bfield_t mask = mi_bfield_one()<bfields[i], idx); + if (already_set != NULL) { *already_set = (was_clear ? 0 : 1); } + return was_clear; +} + +// Set `0 < n <= MI_BFIELD_BITS`, and return true of the mask bits transitioned from all 0's to 1's. +// `already_set` contains the count of bits that were already set (used when committing ranges to account +// statistics correctly). +// Can cross over two bfields. +static inline bool mi_bchunk_setNX(mi_bchunk_t* chunk, size_t cidx, size_t n, size_t* already_set) { + mi_assert_internal(cidx < MI_BCHUNK_BITS); + mi_assert_internal(n > 0 && n <= MI_BFIELD_BITS); + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + if mi_likely(idx + n <= MI_BFIELD_BITS) { + // within one field + return mi_bfield_atomic_set_mask(&chunk->bfields[i], mi_bfield_mask(n,idx), already_set); + } + else { + // spanning two fields + const size_t m = MI_BFIELD_BITS - idx; // bits to clear in the first field + mi_assert_internal(m < n); + mi_assert_internal(i < MI_BCHUNK_FIELDS - 1); + mi_assert_internal(idx + m <= MI_BFIELD_BITS); + size_t already_set1; + const bool all_set1 = mi_bfield_atomic_set_mask(&chunk->bfields[i], mi_bfield_mask(m, idx), &already_set1); + mi_assert_internal(n - m > 0); + mi_assert_internal(n - m < MI_BFIELD_BITS); + size_t already_set2; + const bool all_set2 = mi_bfield_atomic_set_mask(&chunk->bfields[i+1], mi_bfield_mask(n - m, 0), &already_set2); + if (already_set != NULL) { *already_set = already_set1 + already_set2; } + return (all_set1 && all_set2); + } +} + +// Set a sequence of `n` bits within a chunk. +// Returns true if all bits transitioned from 0 to 1 (or 1 to 0). +mi_decl_noinline static bool mi_bchunk_xsetN_(mi_xset_t set, mi_bchunk_t* chunk, size_t cidx, size_t n, size_t* palready_set, bool* pmaybe_all_clear) { + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); + mi_assert_internal(n>0); + bool all_transition = true; + bool maybe_all_clear = true; + size_t total_already_set = 0; + size_t idx = cidx % MI_BFIELD_BITS; + size_t field = cidx / MI_BFIELD_BITS; + while (n > 0) { + size_t m = MI_BFIELD_BITS - idx; // m is the bits to xset in this field + if (m > n) { m = n; } + mi_assert_internal(idx + m <= MI_BFIELD_BITS); + mi_assert_internal(field < MI_BCHUNK_FIELDS); + const mi_bfield_t mask = mi_bfield_mask(m, idx); + size_t already_set = 0; + bool all_clear = false; + const bool transition = (set ? mi_bfield_atomic_set_mask(&chunk->bfields[field], mask, &already_set) + : mi_bfield_atomic_clear_mask(&chunk->bfields[field], mask, &all_clear)); + mi_assert_internal((transition && already_set == 0) || (!transition && already_set > 0)); + all_transition = all_transition && transition; + total_already_set += already_set; + maybe_all_clear = maybe_all_clear && all_clear; + // next field + field++; + idx = 0; + mi_assert_internal(m <= n); + n -= m; + } + if (palready_set!=NULL) { *palready_set = total_already_set; } + if (pmaybe_all_clear!=NULL) { *pmaybe_all_clear = maybe_all_clear; } + return all_transition; +} + +static inline bool mi_bchunk_setN(mi_bchunk_t* chunk, size_t cidx, size_t n, size_t* already_set) { + mi_assert_internal(n>0 && n <= MI_BCHUNK_BITS); + if (n==1) return mi_bchunk_set(chunk, cidx, already_set); + // if (n==8 && (cidx%8) == 0) return mi_bchunk_set8(chunk, cidx, already_set); + // if (n==MI_BFIELD_BITS) return mi_bchunk_setX(chunk, cidx, already_set); + if (n<=MI_BFIELD_BITS) return mi_bchunk_setNX(chunk, cidx, n, already_set); + return mi_bchunk_xsetN_(MI_BIT_SET, chunk, cidx, n, already_set, NULL); +} + +// ------- mi_bchunk_clear --------------------------------------- + +static inline bool mi_bchunk_clear(mi_bchunk_t* chunk, size_t cidx, bool* all_clear) { + mi_assert_internal(cidx < MI_BCHUNK_BITS); + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + return mi_bfield_atomic_clear(&chunk->bfields[i], idx, all_clear); +} + +static inline bool mi_bchunk_clearN(mi_bchunk_t* chunk, size_t cidx, size_t n, bool* maybe_all_clear) { + mi_assert_internal(n>0 && n <= MI_BCHUNK_BITS); + if (n==1) return mi_bchunk_clear(chunk, cidx, maybe_all_clear); + // if (n==8) return mi_bchunk_clear8(chunk, cidx, maybe_all_clear); + // if (n==MI_BFIELD_BITS) return mi_bchunk_clearX(chunk, cidx, maybe_all_clear); + // TODO: implement mi_bchunk_xsetNX instead of setNX + return mi_bchunk_xsetN_(MI_BIT_CLEAR, chunk, cidx, n, NULL, maybe_all_clear); +} + +// Check if a sequence of `n` bits within a chunk are all set/cleared. +// This can cross bfield's +mi_decl_noinline static size_t mi_bchunk_popcountN_(mi_bchunk_t* chunk, size_t field_idx, size_t idx, size_t n) { + mi_assert_internal((field_idx*MI_BFIELD_BITS) + idx + n <= MI_BCHUNK_BITS); + size_t count = 0; + while (n > 0) { + size_t m = MI_BFIELD_BITS - idx; // m is the bits to xset in this field + if (m > n) { m = n; } + mi_assert_internal(idx + m <= MI_BFIELD_BITS); + mi_assert_internal(field_idx < MI_BCHUNK_FIELDS); + const size_t mask = mi_bfield_mask(m, idx); + count += mi_bfield_atomic_popcount_mask(&chunk->bfields[field_idx], mask); + // next field + field_idx++; + idx = 0; + n -= m; + } + return count; +} + +// Count set bits a sequence of `n` bits. +static inline size_t mi_bchunk_popcountN(mi_bchunk_t* chunk, size_t cidx, size_t n) { + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); + mi_assert_internal(n>0); + if (n==0) return 0; + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + if (n==1) { return (mi_bfield_atomic_is_set(&chunk->bfields[i], idx) ? 1 : 0); } + if (idx + n <= MI_BFIELD_BITS) { return mi_bfield_atomic_popcount_mask(&chunk->bfields[i], mi_bfield_mask(n, idx)); } + return mi_bchunk_popcountN_(chunk, i, idx, n); +} + + +// ------- mi_bchunk_is_xset --------------------------------------- + +// Check if a sequence of `n` bits within a chunk are all set/cleared. +// This can cross bfield's +mi_decl_noinline static bool mi_bchunk_is_xsetN_(mi_xset_t set, const mi_bchunk_t* chunk, size_t field_idx, size_t idx, size_t n) { + mi_assert_internal((field_idx*MI_BFIELD_BITS) + idx + n <= MI_BCHUNK_BITS); + while (n > 0) { + size_t m = MI_BFIELD_BITS - idx; // m is the bits to xset in this field + if (m > n) { m = n; } + mi_assert_internal(idx + m <= MI_BFIELD_BITS); + mi_assert_internal(field_idx < MI_BCHUNK_FIELDS); + const size_t mask = mi_bfield_mask(m, idx); + if (!mi_bfield_atomic_is_xset_mask(set, &chunk->bfields[field_idx], mask)) { + return false; + } + // next field + field_idx++; + idx = 0; + n -= m; + } + return true; +} + +// Check if a sequence of `n` bits within a chunk are all set/cleared. +static inline bool mi_bchunk_is_xsetN(mi_xset_t set, const mi_bchunk_t* chunk, size_t cidx, size_t n) { + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); + mi_assert_internal(n>0); + if (n==0) return true; + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + if (n==1) { return mi_bfield_atomic_is_xset(set, &chunk->bfields[i], idx); } + if (idx + n <= MI_BFIELD_BITS) { return mi_bfield_atomic_is_xset_mask(set, &chunk->bfields[i], mi_bfield_mask(n, idx)); } + return mi_bchunk_is_xsetN_(set, chunk, i, idx, n); +} + + +// ------- mi_bchunk_try_clear --------------------------------------- + +// Clear `0 < n <= MI_BITFIELD_BITS`. Can cross over a bfield boundary. +static inline bool mi_bchunk_try_clearNX(mi_bchunk_t* chunk, size_t cidx, size_t n, bool* pmaybe_all_clear) { + mi_assert_internal(cidx < MI_BCHUNK_BITS); + mi_assert_internal(n <= MI_BFIELD_BITS); + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + if mi_likely(idx + n <= MI_BFIELD_BITS) { + // within one field + return mi_bfield_atomic_try_clear_mask(&chunk->bfields[i], mi_bfield_mask(n, idx), pmaybe_all_clear); + } + else { + // spanning two fields (todo: use double-word atomic ops?) + const size_t m = MI_BFIELD_BITS - idx; // bits to clear in the first field + mi_assert_internal(m < n); + mi_assert_internal(i < MI_BCHUNK_FIELDS - 1); + bool field1_is_clear; + if (!mi_bfield_atomic_try_clear_mask(&chunk->bfields[i], mi_bfield_mask(m, idx), &field1_is_clear)) return false; + // try the second field as well + mi_assert_internal(n - m > 0); + mi_assert_internal(n - m < MI_BFIELD_BITS); + bool field2_is_clear; + if (!mi_bfield_atomic_try_clear_mask(&chunk->bfields[i+1], mi_bfield_mask(n - m, 0), &field2_is_clear)) { + // we failed to clear the second field, restore the first one + mi_bfield_atomic_set_mask(&chunk->bfields[i], mi_bfield_mask(m, idx), NULL); + return false; + } + if (pmaybe_all_clear != NULL) { *pmaybe_all_clear = field1_is_clear && field2_is_clear; } + return true; + } +} + +// Clear a full aligned bfield. +// static inline bool mi_bchunk_try_clearX(mi_bchunk_t* chunk, size_t cidx, bool* pmaybe_all_clear) { +// mi_assert_internal(cidx < MI_BCHUNK_BITS); +// mi_assert_internal((cidx%MI_BFIELD_BITS) == 0); +// const size_t i = cidx / MI_BFIELD_BITS; +// return mi_bfield_atomic_try_clearX(&chunk->bfields[i], pmaybe_all_clear); +// } + +// Try to atomically clear a sequence of `n` bits within a chunk. +// Returns true if all bits transitioned from 1 to 0, +// and false otherwise leaving all bit fields as is. +// Note: this is the complex one as we need to unwind partial atomic operations if we fail halfway.. +// `maybe_all_clear` is set to `true` if all the bfields involved become zero. +mi_decl_noinline static bool mi_bchunk_try_clearN_(mi_bchunk_t* chunk, size_t cidx, size_t n, bool* pmaybe_all_clear) { + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); + mi_assert_internal(n>0); + if (pmaybe_all_clear != NULL) { *pmaybe_all_clear = true; } + if (n==0) return true; + + // first field + const size_t start_idx = cidx % MI_BFIELD_BITS; + const size_t start_field = cidx / MI_BFIELD_BITS; + size_t field = start_field; + size_t m = MI_BFIELD_BITS - start_idx; // m are the bits to clear in this field + if (m > n) { m = n; } + mi_assert_internal(start_idx + m <= MI_BFIELD_BITS); + mi_assert_internal(start_field < MI_BCHUNK_FIELDS); + const mi_bfield_t mask_start = mi_bfield_mask(m, start_idx); + bool maybe_all_clear; + if (!mi_bfield_atomic_try_clear_mask(&chunk->bfields[field], mask_start, &maybe_all_clear)) return false; + + // done? + mi_assert_internal(m <= n); + n -= m; + + // continue with mid fields and last field: if these fail we need to recover by unsetting previous fields + // mid fields? + while (n >= MI_BFIELD_BITS) { + field++; + mi_assert_internal(field < MI_BCHUNK_FIELDS); + bool field_is_clear; + if (!mi_bfield_atomic_try_clearX(&chunk->bfields[field], &field_is_clear)) goto restore; + maybe_all_clear = maybe_all_clear && field_is_clear; + n -= MI_BFIELD_BITS; + } + + // last field? + if (n > 0) { + mi_assert_internal(n < MI_BFIELD_BITS); + field++; + mi_assert_internal(field < MI_BCHUNK_FIELDS); + const mi_bfield_t mask_end = mi_bfield_mask(n, 0); + bool field_is_clear; + if (!mi_bfield_atomic_try_clear_mask(&chunk->bfields[field], mask_end, &field_is_clear)) goto restore; + maybe_all_clear = maybe_all_clear && field_is_clear; + } + + if (pmaybe_all_clear != NULL) { *pmaybe_all_clear = maybe_all_clear; } + return true; + +restore: + // `field` is the index of the field that failed to set atomically; we need to restore all previous fields + mi_assert_internal(field > start_field); + while( field > start_field) { + field--; + if (field == start_field) { + mi_bfield_atomic_set_mask(&chunk->bfields[field], mask_start, NULL); + } + else { + mi_bfield_atomic_setX(&chunk->bfields[field], NULL); // mid-field: set all bits again + } + } + return false; +} + + +static inline bool mi_bchunk_try_clearN(mi_bchunk_t* chunk, size_t cidx, size_t n, bool* maybe_all_clear) { + mi_assert_internal(n>0); + // if (n==MI_BFIELD_BITS) return mi_bchunk_try_clearX(chunk, cidx, maybe_all_clear); + if (n<=MI_BFIELD_BITS) return mi_bchunk_try_clearNX(chunk, cidx, n, maybe_all_clear); + return mi_bchunk_try_clearN_(chunk, cidx, n, maybe_all_clear); +} + + +// ------- mi_bchunk_try_find_and_clear --------------------------------------- + +#if MI_OPT_SIMD && defined(__AVX2__) +mi_decl_maybe_unused static inline __m256i mi_mm256_zero(void) { + return _mm256_setzero_si256(); +} +mi_decl_maybe_unused static inline __m256i mi_mm256_ones(void) { + return _mm256_set1_epi64x(~0); +} +mi_decl_maybe_unused static inline bool mi_mm256_is_ones(__m256i vec) { + return _mm256_testc_si256(vec, _mm256_cmpeq_epi32(vec, vec)); +} +mi_decl_maybe_unused static inline bool mi_mm256_is_zero( __m256i vec) { + return _mm256_testz_si256(vec,vec); +} +#endif + +static inline bool mi_bchunk_try_find_and_clear_at(mi_bchunk_t* chunk, size_t chunk_idx, size_t* pidx) { + mi_assert_internal(chunk_idx < MI_BCHUNK_FIELDS); + // note: this must be acquire (and not relaxed), or otherwise the AVX code below can loop forever + // as the compiler won't reload the registers vec1 and vec2 from memory again. + const mi_bfield_t b = mi_atomic_load_acquire(&chunk->bfields[chunk_idx]); + size_t idx; + if (mi_bfield_find_least_bit(b, &idx)) { // find the least bit + if mi_likely(mi_bfield_atomic_try_clear_mask_of(&chunk->bfields[chunk_idx], mi_bfield_mask(1,idx), b, NULL)) { // clear it atomically + *pidx = (chunk_idx*MI_BFIELD_BITS) + idx; + mi_assert_internal(*pidx < MI_BCHUNK_BITS); + return true; + } + } + return false; +} + +// Find least 1-bit in a chunk and try to clear it atomically +// set `*pidx` to the bit index (0 <= *pidx < MI_BCHUNK_BITS) on success. +// This is used to find free slices and abandoned pages and should be efficient. +// todo: try neon version +static inline bool mi_bchunk_try_find_and_clear(mi_bchunk_t* chunk, size_t* pidx) { + #if MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==256) + while (true) { + const __m256i vec = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vcmp = _mm256_cmpeq_epi64(vec, mi_mm256_zero()); // (elem64 == 0 ? 0xFF : 0) + const uint32_t mask = ~_mm256_movemask_epi8(vcmp); // mask of most significant bit of each byte (so each 8 bits are all set or clear) + // mask is inverted, so each 8-bits is 0xFF iff the corresponding elem64 has a bit set (and thus can be cleared) + if (mask==0) return false; + mi_assert_internal((_tzcnt_u32(mask)%8) == 0); // tzcnt == 0, 8, 16, or 24 + const size_t chunk_idx = _tzcnt_u32(mask) / 8; + if (mi_bchunk_try_find_and_clear_at(chunk, chunk_idx, pidx)) return true; + // try again + // note: there must be an atomic release/acquire in between or otherwise the registers may not be reloaded + } + #elif MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512) + while (true) { + size_t chunk_idx = 0; + #if 0 + // one vector at a time + __m256i vec = _mm256_load_si256((const __m256i*)chunk->bfields); + if (mi_mm256_is_zero(vec)) { + chunk_idx += 4; + vec = _mm256_load_si256(((const __m256i*)chunk->bfields) + 1); + } + const __m256i vcmp = _mm256_cmpeq_epi64(vec, mi_mm256_zero()); // (elem64 == 0 ? 0xFF : 0) + const uint32_t mask = ~_mm256_movemask_epi8(vcmp); // mask of most significant bit of each byte (so each 8 bits are all set or clear) + // mask is inverted, so each 8-bits is 0xFF iff the corresponding elem64 has a bit set (and thus can be cleared) + if (mask==0) return false; + mi_assert_internal((_tzcnt_u32(mask)%8) == 0); // tzcnt == 0, 8, 16, or 24 + chunk_idx += _tzcnt_u32(mask) / 8; + #else + // a cache line is 64b so we can just as well load all at the same time + const __m256i vec1 = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vec2 = _mm256_load_si256(((const __m256i*)chunk->bfields)+1); + const __m256i cmpv = mi_mm256_zero(); + const __m256i vcmp1 = _mm256_cmpeq_epi64(vec1, cmpv); // (elem64 == 0 ? 0xFF : 0) + const __m256i vcmp2 = _mm256_cmpeq_epi64(vec2, cmpv); // (elem64 == 0 ? 0xFF : 0) + const uint32_t mask1 = ~_mm256_movemask_epi8(vcmp1); // mask of most significant bit of each byte (so each 8 bits are all set or clear) + const uint32_t mask2 = ~_mm256_movemask_epi8(vcmp2); // mask of most significant bit of each byte (so each 8 bits are all set or clear) + const uint64_t mask = ((uint64_t)mask2 << 32) | mask1; + // mask is inverted, so each 8-bits is 0xFF iff the corresponding elem64 has a bit set (and thus can be cleared) + if (mask==0) return false; + mi_assert_internal((_tzcnt_u64(mask)%8) == 0); // tzcnt == 0, 8, 16, 24 , .. + chunk_idx = mi_ctz(mask) / 8; + #endif + if (mi_bchunk_try_find_and_clear_at(chunk, chunk_idx, pidx)) return true; + // try again + // note: there must be an atomic release/acquire in between or otherwise the registers may not be reloaded + } + #elif MI_OPT_SIMD && (MI_BCHUNK_BITS==512) && MI_ARCH_ARM64 + while(true) { + // a cache line is 64b so we can just as well load all at the same time (?) + const uint64x2_t vzero1_lo = vceqzq_u64(vld1q_u64((uint64_t*)chunk->bfields)); // 2x64 bit is_zero + const uint64x2_t vzero1_hi = vceqzq_u64(vld1q_u64((uint64_t*)chunk->bfields + 2)); // 2x64 bit is_zero + const uint64x2_t vzero2_lo = vceqzq_u64(vld1q_u64((uint64_t*)chunk->bfields + 4)); // 2x64 bit is_zero + const uint64x2_t vzero2_hi = vceqzq_u64(vld1q_u64((uint64_t*)chunk->bfields + 6)); // 2x64 bit is_zero + const uint32x4_t vzero1 = vuzp1q_u32(vreinterpretq_u32_u64(vzero1_lo),vreinterpretq_u32_u64(vzero1_hi)); // unzip even elements: narrow to 4x32 bit is_zero () + const uint32x4_t vzero2 = vuzp1q_u32(vreinterpretq_u32_u64(vzero2_lo),vreinterpretq_u32_u64(vzero2_hi)); // unzip even elements: narrow to 4x32 bit is_zero () + const uint32x4_t vzero1x = vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(vzero1), 24)); // shift-right 2x32bit elem by 24: lo 16 bits contain the 2 lo bytes + const uint32x4_t vzero2x = vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(vzero2), 24)); + const uint16x8_t vzero12 = vreinterpretq_u16_u32(vuzp1q_u32(vzero1x,vzero2x)); // unzip even 32-bit elements into one vector + const uint8x8_t vzero = vmovn_u32(vzero12); // narrow the bottom 16-bits + const uint64_t mask = ~vget_lane_u64(vreinterpret_u64_u8(vzero), 0); // 1 byte for each bfield (0xFF => bfield has a bit set) + if (mask==0) return false; + mi_assert_internal((mi_ctz(mask)%8) == 0); // tzcnt == 0, 8, 16, 24 , .. + const size_t chunk_idx = mi_ctz(mask) / 8; + if (mi_bchunk_try_find_and_clear_at(chunk, chunk_idx, pidx)) return true; + // try again + // note: there must be an atomic release/acquire in between or otherwise the registers may not be reloaded + } + #else + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + if (mi_bchunk_try_find_and_clear_at(chunk, i, pidx)) return true; + } + return false; + #endif +} + +static inline bool mi_bchunk_try_find_and_clear_1(mi_bchunk_t* chunk, size_t n, size_t* pidx) { + mi_assert_internal(n==1); MI_UNUSED(n); + return mi_bchunk_try_find_and_clear(chunk, pidx); +} + +mi_decl_maybe_unused static inline bool mi_bchunk_try_find_and_clear8_at(mi_bchunk_t* chunk, size_t chunk_idx, size_t* pidx) { + const mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[chunk_idx]); + // has_set8 has low bit in each byte set if the byte in x == 0xFF + const mi_bfield_t has_set8 = + ((~b - MI_BFIELD_LO_BIT8) & // high bit set if byte in x is 0xFF or < 0x7F + (b & MI_BFIELD_HI_BIT8)) // high bit set if byte in x is >= 0x80 + >> 7; // shift high bit to low bit + size_t idx; + if (mi_bfield_find_least_bit(has_set8, &idx)) { // find least 1-bit + mi_assert_internal(idx <= (MI_BFIELD_BITS - 8)); + mi_assert_internal((idx%8)==0); + if mi_likely(mi_bfield_atomic_try_clear_mask_of(&chunk->bfields[chunk_idx], (mi_bfield_t)0xFF << idx, b, NULL)) { // unset the byte atomically + *pidx = (chunk_idx*MI_BFIELD_BITS) + idx; + mi_assert_internal(*pidx + 8 <= MI_BCHUNK_BITS); + return true; + } + } + return false; +} + +// find least aligned byte in a chunk with all bits set, and try unset it atomically +// set `*pidx` to its bit index (0 <= *pidx < MI_BCHUNK_BITS) on success. +// Used to find medium size pages in the free blocks. +// todo: try neon version +static mi_decl_noinline bool mi_bchunk_try_find_and_clear8(mi_bchunk_t* chunk, size_t* pidx) { + #if MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512) + while (true) { + // since a cache-line is 64b, load all at once + const __m256i vec1 = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vec2 = _mm256_load_si256((const __m256i*)chunk->bfields+1); + const __m256i cmpv = mi_mm256_ones(); + const __m256i vcmp1 = _mm256_cmpeq_epi8(vec1, cmpv); // (byte == ~0 ? 0xFF : 0) + const __m256i vcmp2 = _mm256_cmpeq_epi8(vec2, cmpv); // (byte == ~0 ? 0xFF : 0) + const uint32_t mask1 = _mm256_movemask_epi8(vcmp1); // mask of most significant bit of each byte + const uint32_t mask2 = _mm256_movemask_epi8(vcmp2); // mask of most significant bit of each byte + const uint64_t mask = ((uint64_t)mask2 << 32) | mask1; + // mask is inverted, so each bit is 0xFF iff the corresponding byte has a bit set (and thus can be cleared) + if (mask==0) return false; + const size_t bidx = _tzcnt_u64(mask); // byte-idx of the byte in the chunk + const size_t chunk_idx = bidx / 8; + const size_t idx = (bidx % 8)*8; + mi_assert_internal(chunk_idx < MI_BCHUNK_FIELDS); + if mi_likely(mi_bfield_atomic_try_clear8(&chunk->bfields[chunk_idx], idx, NULL)) { // clear it atomically + *pidx = (chunk_idx*MI_BFIELD_BITS) + idx; + mi_assert_internal(*pidx + 8 <= MI_BCHUNK_BITS); + return true; + } + // try again + // note: there must be an atomic release/acquire in between or otherwise the registers may not be reloaded } + } + #else + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + if (mi_bchunk_try_find_and_clear8_at(chunk, i, pidx)) return true; + } + return false; + #endif +} + +static inline bool mi_bchunk_try_find_and_clear_8(mi_bchunk_t* chunk, size_t n, size_t* pidx) { + mi_assert_internal(n==8); MI_UNUSED(n); + return mi_bchunk_try_find_and_clear8(chunk, pidx); +} + + +// find least aligned bfield in a chunk with all bits set, and try unset it atomically +// set `*pidx` to its bit index (0 <= *pidx < MI_BCHUNK_BITS) on success. +// Used to find large size pages in the free blocks. +// todo: try neon version +/* +static mi_decl_noinline bool mi_bchunk_try_find_and_clearX(mi_bchunk_t* chunk, size_t* pidx) { + #if MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512) + while (true) { + // since a cache-line is 64b, load all at once + const __m256i vec1 = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vec2 = _mm256_load_si256((const __m256i*)chunk->bfields+1); + const __m256i cmpv = mi_mm256_ones(); + const __m256i vcmp1 = _mm256_cmpeq_epi64(vec1, cmpv); // (bfield == ~0 ? -1 : 0) + const __m256i vcmp2 = _mm256_cmpeq_epi64(vec2, cmpv); // (bfield == ~0 ? -1 : 0) + const uint32_t mask1 = _mm256_movemask_epi8(vcmp1); // mask of most significant bit of each byte + const uint32_t mask2 = _mm256_movemask_epi8(vcmp2); // mask of most significant bit of each byte + const uint64_t mask = ((uint64_t)mask2 << 32) | mask1; + // mask is inverted, so each 8-bits are set iff the corresponding elem64 has all bits set (and thus can be cleared) + if (mask==0) return false; + mi_assert_internal((_tzcnt_u64(mask)%8) == 0); // tzcnt == 0, 8, 16, 24 , .. + const size_t chunk_idx = _tzcnt_u64(mask) / 8; + mi_assert_internal(chunk_idx < MI_BCHUNK_FIELDS); + if mi_likely(mi_bfield_atomic_try_clearX(&chunk->bfields[chunk_idx],NULL)) { + *pidx = chunk_idx*MI_BFIELD_BITS; + mi_assert_internal(*pidx + MI_BFIELD_BITS <= MI_BCHUNK_BITS); + return true; + } + // try again + // note: there must be an atomic release/acquire in between or otherwise the registers may not be reloaded + } #else - size_t bitidx = 0; // otherwise start at 0 + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + const mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[i]); + if (~b==0 && mi_bfield_atomic_try_clearX(&chunk->bfields[i], NULL)) { + *pidx = i*MI_BFIELD_BITS; + mi_assert_internal(*pidx + MI_BFIELD_BITS <= MI_BCHUNK_BITS); + return true; + } + } + return false; #endif - size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx - - // scan linearly for a free range of zero bits - while (bitidx <= bitidx_max) { - const size_t mapm = map & m; - if (mapm == 0) { // are the mask bits free at bitidx? - mi_assert_internal((m >> bitidx) == mask); // no overflow? - const size_t newmap = map | m; - mi_assert_internal((newmap^map) >> bitidx == mask); - if (!mi_atomic_cas_weak_acq_rel(field, &map, newmap)) { // TODO: use strong cas here? - // no success, another thread claimed concurrently.. keep going (with updated `map`) - continue; +} + +static inline bool mi_bchunk_try_find_and_clear_X(mi_bchunk_t* chunk, size_t n, size_t* pidx) { + mi_assert_internal(n==MI_BFIELD_BITS); MI_UNUSED(n); + return mi_bchunk_try_find_and_clearX(chunk, pidx); +} +*/ + +// find a sequence of `n` bits in a chunk with `0 < n <= MI_BFIELD_BITS` with all bits set, +// and try to clear them atomically. +// set `*pidx` to its bit index (0 <= *pidx <= MI_BCHUNK_BITS - n) on success. +// will cross bfield boundaries. +mi_decl_noinline static bool mi_bchunk_try_find_and_clearNX(mi_bchunk_t* chunk, size_t n, size_t* pidx) { + if (n == 0 || n > MI_BFIELD_BITS) return false; + const mi_bfield_t mask = mi_bfield_mask(n, 0); + // for all fields in the chunk + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + mi_bfield_t b0 = mi_atomic_load_relaxed(&chunk->bfields[i]); + mi_bfield_t b = b0; + size_t idx; + + // is there a range inside the field? + while (mi_bfield_find_least_bit(b, &idx)) { // find least 1-bit + if (idx + n > MI_BFIELD_BITS) break; // too short: maybe cross over, or continue with the next field + + const size_t bmask = mask<>idx == mask); + if ((b&bmask) == bmask) { // found a match with all bits set, try clearing atomically + if mi_likely(mi_bfield_atomic_try_clear_mask_of(&chunk->bfields[i], bmask, b0, NULL)) { + *pidx = (i*MI_BFIELD_BITS) + idx; + mi_assert_internal(*pidx < MI_BCHUNK_BITS); + mi_assert_internal(*pidx + n <= MI_BCHUNK_BITS); + return true; + } + else { + // if we failed to atomically commit, reload b and try again from the start + b = b0 = mi_atomic_load_acquire(&chunk->bfields[i]); + } } else { - // success, we claimed the bits! - *bitmap_idx = mi_bitmap_index_create(idx, bitidx); - return true; + // advance by clearing the least run of ones, for example, with n>=4, idx=2: + // b = 1111 1101 1010 1100 + // .. + (1< 0) { + const size_t pre = mi_bfield_ctz(~mi_atomic_load_relaxed(&chunk->bfields[i+1])); + if (post + pre >= n) { + // it fits -- try to claim it atomically + const size_t cidx = (i*MI_BFIELD_BITS) + (MI_BFIELD_BITS - post); + if (mi_bchunk_try_clearNX(chunk, cidx, n, NULL)) { + // we cleared all atomically + *pidx = cidx; + mi_assert_internal(*pidx < MI_BCHUNK_BITS); + mi_assert_internal(*pidx + n <= MI_BCHUNK_BITS); + return true; + } + } } } + } + return false; +} + +// find a sequence of `n` bits in a chunk with `n < MI_BCHUNK_BITS` with all bits set, +// and try to clear them atomically. +// set `*pidx` to its bit index (0 <= *pidx <= MI_BCHUNK_BITS - n) on success. +// This can cross bfield boundaries. +static mi_decl_noinline bool mi_bchunk_try_find_and_clearN_(mi_bchunk_t* chunk, size_t n, size_t* pidx) { + if (n == 0 || n > MI_BCHUNK_BITS) return false; // cannot be more than a chunk + + // we first scan ahead to see if there is a range of `n` set bits, and only then try to clear atomically + mi_assert_internal(n>0); + const size_t skip_count = (n-1)/MI_BFIELD_BITS; + size_t cidx; + for (size_t i = 0; i < MI_BCHUNK_FIELDS - skip_count; i++) + { + size_t m = n; // bits to go + + // first field + mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[i]); + size_t ones = mi_bfield_clz(~b); + cidx = (i*MI_BFIELD_BITS) + (MI_BFIELD_BITS - ones); // start index + if (ones >= m) { + // we found enough bits! + m = 0; + } else { - // on to the next bit range -#ifdef MI_HAVE_FAST_BITSCAN - const size_t shift = (count == 1 ? 1 : mi_bsr(mapm) - bitidx + 1); - mi_assert_internal(shift > 0 && shift <= count); + m -= ones; + + // keep scanning further fields? + size_t j = 1; // field count from i + while (i+j < MI_BCHUNK_FIELDS) { + mi_assert_internal(m > 0); + b = mi_atomic_load_relaxed(&chunk->bfields[i+j]); + ones = mi_bfield_ctz(~b); + if (ones >= m) { + // we found enough bits + m = 0; + break; + } + else if (ones == MI_BFIELD_BITS) { + // not enough yet, proceed to the next field + j++; + m -= MI_BFIELD_BITS; + } + else { + // the range was not enough, start from scratch + i = i + j - 1; // no need to re-scan previous fields, except the last one (with clz this time) + mi_assert_internal(m>0); + break; + } + } + } + + // did we find a range? + if (m==0) { + if (mi_bchunk_try_clearN(chunk, cidx, n, NULL)) { + // we cleared all atomically + *pidx = cidx; + mi_assert_internal(*pidx < MI_BCHUNK_BITS); + mi_assert_internal(*pidx + n <= MI_BCHUNK_BITS); + return true; + } + // note: if we fail for a small `n` on the first field, we don't rescan that field (as `i` is incremented) + } + // otherwise continue searching + } + return false; +} + + + +// ------- mi_bchunk_clear_once_set --------------------------------------- + +static inline void mi_bchunk_clear_once_set(mi_bchunk_t* chunk, size_t cidx) { + mi_assert_internal(cidx < MI_BCHUNK_BITS); + const size_t i = cidx / MI_BFIELD_BITS; + const size_t idx = cidx % MI_BFIELD_BITS; + mi_bfield_atomic_clear_once_set(&chunk->bfields[i], idx); +} + + +// ------- mi_bitmap_all_are_clear --------------------------------------- + + +// are all bits in a bitmap chunk clear? +static inline bool mi_bchunk_all_are_clear_relaxed(mi_bchunk_t* chunk) { + #if MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==256) + const __m256i vec = _mm256_load_si256((const __m256i*)chunk->bfields); + return mi_mm256_is_zero(vec); + #elif MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512) + // a 64b cache-line contains the entire chunk anyway so load both at once + const __m256i vec1 = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vec2 = _mm256_load_si256(((const __m256i*)chunk->bfields)+1); + return (mi_mm256_is_zero(_mm256_or_si256(vec1,vec2))); + #elif MI_OPT_SIMD && (MI_BCHUNK_BITS==512) && MI_ARCH_ARM64 + const uint64x2_t v0 = vld1q_u64((uint64_t*)chunk->bfields); + const uint64x2_t v1 = vld1q_u64((uint64_t*)chunk->bfields + 2); + const uint64x2_t v2 = vld1q_u64((uint64_t*)chunk->bfields + 4); + const uint64x2_t v3 = vld1q_u64((uint64_t*)chunk->bfields + 6); + const uint64x2_t v = vorrq_u64(vorrq_u64(v0,v1),vorrq_u64(v2,v3)); + return (vmaxvq_u32(vreinterpretq_u32_u64(v)) == 0); + #else + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + if (mi_atomic_load_relaxed(&chunk->bfields[i]) != 0) return false; + } + return true; + #endif +} + +// are all bits in a bitmap chunk set? +static inline bool mi_bchunk_all_are_set_relaxed(mi_bchunk_t* chunk) { +#if MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==256) + const __m256i vec = _mm256_load_si256((const __m256i*)chunk->bfields); + return mi_mm256_is_ones(vec); +#elif MI_OPT_SIMD && defined(__AVX2__) && (MI_BCHUNK_BITS==512) + // a 64b cache-line contains the entire chunk anyway so load both at once + const __m256i vec1 = _mm256_load_si256((const __m256i*)chunk->bfields); + const __m256i vec2 = _mm256_load_si256(((const __m256i*)chunk->bfields)+1); + return (mi_mm256_is_ones(_mm256_and_si256(vec1, vec2))); +#elif MI_OPT_SIMD && (MI_BCHUNK_BITS==512) && MI_ARCH_ARM64 + const uint64x2_t v0 = vld1q_u64((uint64_t*)chunk->bfields); + const uint64x2_t v1 = vld1q_u64((uint64_t*)chunk->bfields + 2); + const uint64x2_t v2 = vld1q_u64((uint64_t*)chunk->bfields + 4); + const uint64x2_t v3 = vld1q_u64((uint64_t*)chunk->bfields + 6); + const uint64x2_t v = vandq_u64(vandq_u64(v0,v1),vandq_u64(v2,v3)); + return (vminvq_u32(vreinterpretq_u32_u64(v)) == 0xFFFFFFFFUL); #else - const size_t shift = 1; + for (int i = 0; i < MI_BCHUNK_FIELDS; i++) { + if (~mi_atomic_load_relaxed(&chunk->bfields[i]) != 0) return false; + } + return true; #endif - bitidx += shift; - m <<= shift; +} + + +static bool mi_bchunk_bsr(mi_bchunk_t* chunk, size_t* pidx) { + for (size_t i = MI_BCHUNK_FIELDS; i > 0; ) { + i--; + mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[i]); + size_t idx; + if (mi_bsr(b, &idx)) { + *pidx = (i*MI_BFIELD_BITS) + idx; + return true; + } + } + return false; +} + +static size_t mi_bchunk_popcount(mi_bchunk_t* chunk) { + size_t popcount = 0; + for (size_t i = 0; i < MI_BCHUNK_FIELDS; i++) { + const mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[i]); + popcount += mi_bfield_popcount(b); + } + return popcount; +} + + +/* -------------------------------------------------------------------------------- + bitmap chunkmap +-------------------------------------------------------------------------------- */ + +static void mi_bitmap_chunkmap_set(mi_bitmap_t* bitmap, size_t chunk_idx) { + mi_assert(chunk_idx < mi_bitmap_chunk_count(bitmap)); + mi_bchunk_set(&bitmap->chunkmap, chunk_idx, NULL); +} + +static bool mi_bitmap_chunkmap_try_clear(mi_bitmap_t* bitmap, size_t chunk_idx) { + mi_assert(chunk_idx < mi_bitmap_chunk_count(bitmap)); + // check if the corresponding chunk is all clear + if (!mi_bchunk_all_are_clear_relaxed(&bitmap->chunks[chunk_idx])) return false; + // clear the chunkmap bit + mi_bchunk_clear(&bitmap->chunkmap, chunk_idx, NULL); + // .. but a concurrent set may have happened in between our all-clear test and the clearing of the + // bit in the mask. We check again to catch this situation. + if (!mi_bchunk_all_are_clear_relaxed(&bitmap->chunks[chunk_idx])) { + mi_bchunk_set(&bitmap->chunkmap, chunk_idx, NULL); + return false; + } + return true; +} + + +/* -------------------------------------------------------------------------------- + bitmap +-------------------------------------------------------------------------------- */ + +size_t mi_bitmap_size(size_t bit_count, size_t* pchunk_count) { + mi_assert_internal((bit_count % MI_BCHUNK_BITS) == 0); + bit_count = _mi_align_up(bit_count, MI_BCHUNK_BITS); + mi_assert_internal(bit_count <= MI_BITMAP_MAX_BIT_COUNT); + mi_assert_internal(bit_count > 0); + const size_t chunk_count = bit_count / MI_BCHUNK_BITS; + mi_assert_internal(chunk_count >= 1); + const size_t size = offsetof(mi_bitmap_t,chunks) + (chunk_count * MI_BCHUNK_SIZE); + mi_assert_internal( (size%MI_BCHUNK_SIZE) == 0 ); + if (pchunk_count != NULL) { *pchunk_count = chunk_count; } + return size; +} + + +// initialize a bitmap to all unset; avoid a mem_zero if `already_zero` is true +// returns the size of the bitmap +size_t mi_bitmap_init(mi_bitmap_t* bitmap, size_t bit_count, bool already_zero) { + size_t chunk_count; + const size_t size = mi_bitmap_size(bit_count, &chunk_count); + if (!already_zero) { + _mi_memzero_aligned(bitmap, size); + } + mi_atomic_store_release(&bitmap->chunk_count, chunk_count); + mi_assert_internal(mi_atomic_load_relaxed(&bitmap->chunk_count) <= MI_BITMAP_MAX_CHUNK_COUNT); + return size; +} + + +// Set a sequence of `n` bits in the bitmap (and can cross chunks). Not atomic so only use if local to a thread. +static void mi_bchunks_unsafe_setN(mi_bchunk_t* chunks, mi_bchunkmap_t* cmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + + // start chunk and index + size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + const size_t ccount = _mi_divide_up(n, MI_BCHUNK_BITS); + + // first update the chunkmap + mi_bchunk_setN(cmap, chunk_idx, ccount, NULL); + + // first chunk + size_t m = MI_BCHUNK_BITS - cidx; + if (m > n) { m = n; } + mi_bchunk_setN(&chunks[chunk_idx], cidx, m, NULL); + + // n can be large so use memset for efficiency for all in-between chunks + chunk_idx++; + n -= m; + const size_t mid_chunks = n / MI_BCHUNK_BITS; + if (mid_chunks > 0) { + _mi_memset(&chunks[chunk_idx], ~0, mid_chunks * MI_BCHUNK_SIZE); + chunk_idx += mid_chunks; + n -= (mid_chunks * MI_BCHUNK_BITS); + } + + // last chunk + if (n > 0) { + mi_assert_internal(n < MI_BCHUNK_BITS); + mi_assert_internal(chunk_idx < MI_BCHUNK_FIELDS); + mi_bchunk_setN(&chunks[chunk_idx], 0, n, NULL); + } +} + +// Set a sequence of `n` bits in the bitmap (and can cross chunks). Not atomic so only use if local to a thread. +void mi_bitmap_unsafe_setN(mi_bitmap_t* bitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(idx + n <= mi_bitmap_max_bits(bitmap)); + mi_bchunks_unsafe_setN(&bitmap->chunks[0], &bitmap->chunkmap, idx, n); +} + + + + +// ------- mi_bitmap_xset --------------------------------------- + +// Set a sequence of `n` bits in the bitmap; returns `true` if atomically transitioned from 0's to 1's (or 1's to 0's). +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +bool mi_bitmap_setN(mi_bitmap_t* bitmap, size_t idx, size_t n, size_t* already_set) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + + const bool were_allclear = mi_bchunk_setN(&bitmap->chunks[chunk_idx], cidx, n, already_set); + mi_bitmap_chunkmap_set(bitmap, chunk_idx); // set afterwards + return were_allclear; +} + +// Clear a sequence of `n` bits in the bitmap; returns `true` if atomically transitioned from 1's to 0's. +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +bool mi_bitmap_clearN(mi_bitmap_t* bitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + + bool maybe_all_clear; + const bool were_allset = mi_bchunk_clearN(&bitmap->chunks[chunk_idx], cidx, n, &maybe_all_clear); + if (maybe_all_clear) { mi_bitmap_chunkmap_try_clear(bitmap, chunk_idx); } + return were_allset; +} + +// Count bits set in a range of `n` bits. +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +size_t mi_bitmap_popcountN( mi_bitmap_t* bitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + return mi_bchunk_popcountN(&bitmap->chunks[chunk_idx], cidx, n); +} + + +// Set/clear a bit in the bitmap; returns `true` if atomically transitioned from 0 to 1 (or 1 to 0) +bool mi_bitmap_set(mi_bitmap_t* bitmap, size_t idx) { + return mi_bitmap_setN(bitmap, idx, 1, NULL); +} + +bool mi_bitmap_clear(mi_bitmap_t* bitmap, size_t idx) { + return mi_bitmap_clearN(bitmap, idx, 1); +} + + + +// ------- mi_bitmap_is_xset --------------------------------------- + +// Is a sequence of n bits already all set/cleared? +bool mi_bitmap_is_xsetN(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + mi_assert_internal(idx + n <= mi_bitmap_max_bits(bitmap)); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + + return mi_bchunk_is_xsetN(set, &bitmap->chunks[chunk_idx], cidx, n); +} + + + + +/* -------------------------------------------------------------------------------- + Iterate through a bfield +-------------------------------------------------------------------------------- */ + +// Cycle iteration through a bitfield. This is used to space out threads +// so there is less chance of contention. When searching for a free page we +// like to first search only the accessed part (so we reuse better). This +// high point is called the `cycle`. +// +// We then iterate through the bitfield as: +// first: [start, cycle> +// then : [0, start> +// then : [cycle, MI_BFIELD_BITS> +// +// The start is determined usually as `tseq % cycle` to have each thread +// start at a different spot. +// - We use `popcount` to improve branch prediction (maybe not needed? can we simplify?) +// - The `cycle_mask` is the part `[start, cycle>`. +#define mi_bfield_iterate(bfield,start,cycle,name_idx,SUF) { \ + mi_assert_internal(start <= cycle); \ + mi_assert_internal(start < MI_BFIELD_BITS); \ + mi_assert_internal(cycle <= MI_BFIELD_BITS); \ + mi_bfield_t _cycle_mask##SUF = mi_bfield_mask(cycle - start, start); \ + size_t _bcount##SUF = mi_bfield_popcount(bfield); \ + mi_bfield_t _b##SUF = bfield & _cycle_mask##SUF; /* process [start, cycle> first*/\ + while(_bcount##SUF > 0) { \ + _bcount##SUF--;\ + if (_b##SUF==0) { _b##SUF = bfield & ~_cycle_mask##SUF; } /* process [0,start> + [cycle, MI_BFIELD_BITS> next */ \ + /* size_t name_idx; */ \ + bool _found##SUF = mi_bfield_find_least_bit(_b##SUF,&name_idx); \ + mi_assert_internal(_found##SUF); MI_UNUSED(_found##SUF); \ + { \ + +#define mi_bfield_iterate_end(SUF) \ + } \ + _b##SUF = mi_bfield_clear_least_bit(_b##SUF); \ + } \ +} + +#define mi_bfield_cycle_iterate(bfield,tseq,cycle,name_idx,SUF) { \ + const size_t _start##SUF = (uint32_t)(tseq) % (uint32_t)(cycle); /* or: 0 to always search from the start? */\ + mi_bfield_iterate(bfield,_start##SUF,cycle,name_idx,SUF) + +#define mi_bfield_cycle_iterate_end(SUF) \ + mi_bfield_iterate_end(SUF); } + + +/* -------------------------------------------------------------------------------- + mi_bitmap_find + (used to find free pages) +-------------------------------------------------------------------------------- */ + +typedef bool (mi_bitmap_visit_fun_t)(mi_bitmap_t* bitmap, size_t chunk_idx, size_t n, size_t* idx, void* arg1, void* arg2); + +// Go through the bitmap and for every sequence of `n` set bits, call the visitor function. +// If it returns `true` stop the search. +static inline bool mi_bitmap_find(mi_bitmap_t* bitmap, size_t tseq, size_t n, size_t* pidx, mi_bitmap_visit_fun_t* on_find, void* arg1, void* arg2) +{ + const size_t chunkmap_max = _mi_divide_up(mi_bitmap_chunk_count(bitmap), MI_BFIELD_BITS); + for (size_t i = 0; i < chunkmap_max; i++) { + // and for each chunkmap entry we iterate over its bits to find the chunks + const mi_bfield_t cmap_entry = mi_atomic_load_relaxed(&bitmap->chunkmap.bfields[i]); + size_t hi; + if (mi_bfield_find_highest_bit(cmap_entry, &hi)) { + size_t eidx = 0; + mi_bfield_cycle_iterate(cmap_entry, tseq%8, hi+1, eidx, Y) // reduce the tseq to 8 bins to reduce using extra memory (see `mstress`) + { + mi_assert_internal(eidx <= MI_BFIELD_BITS); + const size_t chunk_idx = i*MI_BFIELD_BITS + eidx; + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + if ((*on_find)(bitmap, chunk_idx, n, pidx, arg1, arg2)) { + return true; + } + } + mi_bfield_cycle_iterate_end(Y); } } - // no bits found return false; } -// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success. -// Starts at idx, and wraps around to search in all `bitmap_fields` fields. -// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. -bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) { - size_t idx = start_field_idx; - for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { - if (idx >= bitmap_fields) idx = 0; // wrap - if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + +/* -------------------------------------------------------------------------------- + Bitmap: try_find_and_claim -- used to allocate abandoned pages + note: the compiler will fully inline the indirect function call +-------------------------------------------------------------------------------- */ + +typedef struct mi_claim_fun_data_s { + mi_arena_t* arena; + mi_heaptag_t heap_tag; +} mi_claim_fun_data_t; + +static bool mi_bitmap_try_find_and_claim_visit(mi_bitmap_t* bitmap, size_t chunk_idx, size_t n, size_t* pidx, void* arg1, void* arg2) +{ + mi_assert_internal(n==1); MI_UNUSED(n); + mi_claim_fun_t* claim_fun = (mi_claim_fun_t*)arg1; + mi_claim_fun_data_t* claim_data = (mi_claim_fun_data_t*)arg2; + size_t cidx; + if mi_likely(mi_bchunk_try_find_and_clear(&bitmap->chunks[chunk_idx], &cidx)) { + const size_t slice_index = (chunk_idx * MI_BCHUNK_BITS) + cidx; + mi_assert_internal(slice_index < mi_bitmap_max_bits(bitmap)); + bool keep_set = true; + if ((*claim_fun)(slice_index, claim_data->arena, claim_data->heap_tag, &keep_set)) { + // success! + mi_assert_internal(!keep_set); + *pidx = slice_index; return true; } + else { + // failed to claim it, set abandoned mapping again (unless the page was freed) + if (keep_set) { + const bool wasclear = mi_bchunk_set(&bitmap->chunks[chunk_idx], cidx, NULL); + mi_assert_internal(wasclear); MI_UNUSED(wasclear); + } + } + } + else { + // we may find that all are cleared only on a second iteration but that is ok as + // the chunkmap is a conservative approximation. + mi_bitmap_chunkmap_try_clear(bitmap, chunk_idx); } return false; } -// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled -bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, - const size_t start_field_idx, const size_t count, - mi_bitmap_pred_fun_t pred_fun, void* pred_arg, - mi_bitmap_index_t* bitmap_idx) { - size_t idx = start_field_idx; - for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { - if (idx >= bitmap_fields) idx = 0; // wrap - if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { - if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) { +// Find a set bit in the bitmap and try to atomically clear it and claim it. +// (Used to find pages in the pages_abandoned bitmaps.) +mi_decl_nodiscard bool mi_bitmap_try_find_and_claim(mi_bitmap_t* bitmap, size_t tseq, size_t* pidx, + mi_claim_fun_t* claim, mi_arena_t* arena, mi_heaptag_t heap_tag) +{ + mi_claim_fun_data_t claim_data = { arena, heap_tag }; + return mi_bitmap_find(bitmap, tseq, 1, pidx, &mi_bitmap_try_find_and_claim_visit, (void*)claim, &claim_data); +} + + +bool mi_bitmap_bsr(mi_bitmap_t* bitmap, size_t* idx) { + const size_t chunkmap_max = _mi_divide_up(mi_bitmap_chunk_count(bitmap), MI_BFIELD_BITS); + for (size_t i = chunkmap_max; i > 0; ) { + i--; + mi_bfield_t cmap = mi_atomic_load_relaxed(&bitmap->chunkmap.bfields[i]); + size_t cmap_idx; + if (mi_bsr(cmap,&cmap_idx)) { + // highest chunk + const size_t chunk_idx = i*MI_BFIELD_BITS + cmap_idx; + size_t cidx; + if (mi_bchunk_bsr(&bitmap->chunks[chunk_idx], &cidx)) { + *idx = (chunk_idx * MI_BCHUNK_BITS) + cidx; return true; } - // predicate returned false, unclaim and look further - _mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx); } } return false; } -/* -// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success. -// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never span fields. -bool _mi_bitmap_try_find_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t count, mi_bitmap_index_t* bitmap_idx) { - return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, 0, count, bitmap_idx); +// Return count of all set bits in a bitmap. +size_t mi_bitmap_popcount(mi_bitmap_t* bitmap) { + // for all chunkmap entries + size_t popcount = 0; + const size_t chunkmap_max = _mi_divide_up(mi_bitmap_chunk_count(bitmap), MI_BFIELD_BITS); + for (size_t i = 0; i < chunkmap_max; i++) { + mi_bfield_t cmap_entry = mi_atomic_load_relaxed(&bitmap->chunkmap.bfields[i]); + size_t cmap_idx; + // for each chunk (corresponding to a set bit in a chunkmap entry) + while (mi_bfield_foreach_bit(&cmap_entry, &cmap_idx)) { + const size_t chunk_idx = i*MI_BFIELD_BITS + cmap_idx; + // count bits in a chunk + popcount += mi_bchunk_popcount(&bitmap->chunks[chunk_idx]); + } + } + return popcount; } -*/ -// Set `count` bits at `bitmap_idx` to 0 atomically -// Returns `true` if all `count` bits were 1 previously. -bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - const size_t idx = mi_bitmap_index_field(bitmap_idx); - const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const size_t mask = mi_bitmap_mask_(count, bitidx); - mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); - // mi_assert_internal((bitmap[idx] & mask) == mask); - size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask); - return ((prev & mask) == mask); + + +// Clear a bit once it is set. +void mi_bitmap_clear_once_set(mi_bitmap_t* bitmap, size_t idx) { + mi_assert_internal(idx < mi_bitmap_max_bits(bitmap)); + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(chunk_idx < mi_bitmap_chunk_count(bitmap)); + mi_bchunk_clear_once_set(&bitmap->chunks[chunk_idx], cidx); } -// Set `count` bits at `bitmap_idx` to 1 atomically -// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) { - const size_t idx = mi_bitmap_index_field(bitmap_idx); - const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const size_t mask = mi_bitmap_mask_(count, bitidx); - mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); - //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0); - size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask); - if (any_zero != NULL) *any_zero = ((prev & mask) != mask); - return ((prev & mask) == 0); +// Visit all set bits in a bitmap. +// todo: optimize further? maybe use avx512 to directly get all indices using a mask_compressstore? +bool _mi_bitmap_forall_set(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_arena_t* arena, void* arg) { + // for all chunkmap entries + const size_t chunkmap_max = _mi_divide_up(mi_bitmap_chunk_count(bitmap), MI_BFIELD_BITS); + for(size_t i = 0; i < chunkmap_max; i++) { + mi_bfield_t cmap_entry = mi_atomic_load_relaxed(&bitmap->chunkmap.bfields[i]); + size_t cmap_idx; + // for each chunk (corresponding to a set bit in a chunkmap entry) + while (mi_bfield_foreach_bit(&cmap_entry, &cmap_idx)) { + const size_t chunk_idx = i*MI_BFIELD_BITS + cmap_idx; + // for each chunk field + mi_bchunk_t* const chunk = &bitmap->chunks[chunk_idx]; + for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) { + const size_t base_idx = (chunk_idx*MI_BCHUNK_BITS) + (j*MI_BFIELD_BITS); + mi_bfield_t b = mi_atomic_load_relaxed(&chunk->bfields[j]); + size_t bidx; + while (mi_bfield_foreach_bit(&b, &bidx)) { + const size_t idx = base_idx + bidx; + if (!visit(idx, 1, arena, arg)) return false; + } + } + } + } + return true; } -// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one. -static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) { - const size_t idx = mi_bitmap_index_field(bitmap_idx); - const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - const size_t mask = mi_bitmap_mask_(count, bitidx); - mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); - size_t field = mi_atomic_load_relaxed(&bitmap[idx]); - if (any_ones != NULL) *any_ones = ((field & mask) != 0); - return ((field & mask) == mask); +// Visit all set bits in a bitmap but try to return ranges (within bfields) if possible. +// Also clear those ranges atomically. +// Used by purging to purge larger ranges when possible +// todo: optimize further? maybe use avx512 to directly get all indices using a mask_compressstore? +bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_arena_t* arena, void* arg) { + // for all chunkmap entries + const size_t chunkmap_max = _mi_divide_up(mi_bitmap_chunk_count(bitmap), MI_BFIELD_BITS); + for (size_t i = 0; i < chunkmap_max; i++) { + mi_bfield_t cmap_entry = mi_atomic_load_relaxed(&bitmap->chunkmap.bfields[i]); + size_t cmap_idx; + // for each chunk (corresponding to a set bit in a chunkmap entry) + while (mi_bfield_foreach_bit(&cmap_entry, &cmap_idx)) { + const size_t chunk_idx = i*MI_BFIELD_BITS + cmap_idx; + // for each chunk field + mi_bchunk_t* const chunk = &bitmap->chunks[chunk_idx]; + for (size_t j = 0; j < MI_BCHUNK_FIELDS; j++) { + const size_t base_idx = (chunk_idx*MI_BCHUNK_BITS) + (j*MI_BFIELD_BITS); + mi_bfield_t b = mi_atomic_exchange_acq_rel(&chunk->bfields[j], 0); // can be relaxed? + #if MI_DEBUG > 1 + const size_t bpopcount = mi_popcount(b); + size_t rngcount = 0; + #endif + size_t bidx; + while (mi_bfield_find_least_bit(b, &bidx)) { + const size_t rng = mi_ctz(~(b>>bidx)); // all the set bits from bidx + #if MI_DEBUG > 1 + rngcount += rng; + #endif + mi_assert_internal(rng>=1 && rng<=MI_BFIELD_BITS); + const size_t idx = base_idx + bidx; + mi_assert_internal((idx % MI_BFIELD_BITS) + rng <= MI_BFIELD_BITS); + mi_assert_internal((idx / MI_BCHUNK_BITS) < mi_bitmap_chunk_count(bitmap)); + if (!visit(idx, rng, arena, arg)) return false; + // clear rng bits in b + b = b & ~mi_bfield_mask(rng, bidx); + } + mi_assert_internal(rngcount == bpopcount); + } + } + } + return true; } -bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL); + + +/* -------------------------------------------------------------------------------- + binned bitmap's +-------------------------------------------------------------------------------- */ + + +size_t mi_bbitmap_size(size_t bit_count, size_t* pchunk_count) { + // mi_assert_internal((bit_count % MI_BCHUNK_BITS) == 0); + bit_count = _mi_align_up(bit_count, MI_BCHUNK_BITS); + mi_assert_internal(bit_count <= MI_BITMAP_MAX_BIT_COUNT); + mi_assert_internal(bit_count > 0); + const size_t chunk_count = bit_count / MI_BCHUNK_BITS; + mi_assert_internal(chunk_count >= 1); + const size_t size = offsetof(mi_bbitmap_t,chunks) + (chunk_count * MI_BCHUNK_SIZE); + mi_assert_internal( (size%MI_BCHUNK_SIZE) == 0 ); + if (pchunk_count != NULL) { *pchunk_count = chunk_count; } + return size; } -bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - bool any_ones; - mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); - return any_ones; +// initialize a bitmap to all unset; avoid a mem_zero if `already_zero` is true +// returns the size of the bitmap +size_t mi_bbitmap_init(mi_bbitmap_t* bbitmap, size_t bit_count, bool already_zero) { + size_t chunk_count; + const size_t size = mi_bbitmap_size(bit_count, &chunk_count); + if (!already_zero) { + _mi_memzero_aligned(bbitmap, size); + } + mi_atomic_store_release(&bbitmap->chunk_count, chunk_count); + mi_assert_internal(mi_atomic_load_relaxed(&bbitmap->chunk_count) <= MI_BITMAP_MAX_CHUNK_COUNT); + return size; } +void mi_bbitmap_unsafe_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(idx + n <= mi_bbitmap_max_bits(bbitmap)); + mi_bchunks_unsafe_setN(&bbitmap->chunks[0], &bbitmap->chunkmap, idx, n); +} -//-------------------------------------------------------------------------- -// the `_across` functions work on bitmaps where sequences can cross over -// between the fields. This is used in arena allocation -//-------------------------------------------------------------------------- -// Try to atomically claim a sequence of `count` bits starting from the field -// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success. -static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx) -{ - mi_assert_internal(bitmap_idx != NULL); - - // check initial trailing zeros - mi_bitmap_field_t* field = &bitmap[idx]; - size_t map = mi_atomic_load_relaxed(field); - const size_t initial = mi_clz(map); // count of initial zeros starting at idx - mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS); - if (initial == 0) return false; - if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields - if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries - - // scan ahead - size_t found = initial; - size_t mask = 0; // mask bits for the final field - while(found < count) { - field++; - map = mi_atomic_load_relaxed(field); - const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found)); - mask = mi_bitmap_mask_(mask_bits, 0); - if ((map & mask) != 0) return false; - found += mask_bits; - } - mi_assert_internal(field < &bitmap[bitmap_fields]); - - // found range of zeros up to the final field; mask contains mask in the final field - // now claim it atomically - mi_bitmap_field_t* const final_field = field; - const size_t final_mask = mask; - mi_bitmap_field_t* const initial_field = &bitmap[idx]; - const size_t initial_mask = mi_bitmap_mask_(initial, MI_BITMAP_FIELD_BITS - initial); - - // initial field - size_t newmap; - field = initial_field; - map = mi_atomic_load_relaxed(field); - do { - newmap = map | initial_mask; - if ((map & initial_mask) != 0) { goto rollback; }; - } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); - - // intermediate fields - while (++field < final_field) { - newmap = MI_BITMAP_FIELD_FULL; - map = 0; - if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; } - } - - // final field - mi_assert_internal(field == final_field); - map = mi_atomic_load_relaxed(field); - do { - newmap = map | final_mask; - if ((map & final_mask) != 0) { goto rollback; } - } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); - // claimed! - *bitmap_idx = mi_bitmap_index_create(idx, MI_BITMAP_FIELD_BITS - initial); - return true; +/* -------------------------------------------------------------------------------- + binned bitmap used to track free slices +-------------------------------------------------------------------------------- */ -rollback: - // roll back intermediate fields - while (--field > initial_field) { - newmap = 0; - map = MI_BITMAP_FIELD_FULL; - mi_assert_internal(mi_atomic_load_relaxed(field) == map); - mi_atomic_store_release(field, newmap); - } - if (field == initial_field) { - map = mi_atomic_load_relaxed(field); - do { - mi_assert_internal((map & initial_mask) == initial_mask); - newmap = map & ~initial_mask; - } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); +// Assign a specific size bin to a chunk +static void mi_bbitmap_set_chunk_bin(mi_bbitmap_t* bbitmap, size_t chunk_idx, mi_chunkbin_t bin) { + mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) { + if (ibin == bin) { + const bool was_clear = mi_bchunk_set(& bbitmap->chunkmap_bins[ibin], chunk_idx, NULL); + if (was_clear) { mi_os_stat_increase(chunk_bins[ibin],1); } + } + else { + const bool was_set = mi_bchunk_clear(&bbitmap->chunkmap_bins[ibin], chunk_idx, NULL); + if (was_set) { mi_os_stat_decrease(chunk_bins[ibin],1); } + } } - // retry? (we make a recursive call instead of goto to be able to use const declarations) - if (retries < 4) { - return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx); +} + +mi_chunkbin_t mi_bbitmap_debug_get_bin(const mi_bchunkmap_t* chunkmap_bins, size_t chunk_idx) { + for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) { + if (mi_bchunk_is_xsetN(MI_BIT_SET, &chunkmap_bins[ibin], chunk_idx, 1)) { + return ibin; + } } - else { + return MI_CBIN_NONE; +} + +// Track the index of the highest chunk that is accessed. +static void mi_bbitmap_chunkmap_set_max(mi_bbitmap_t* bbitmap, size_t chunk_idx) { + size_t oldmax = mi_atomic_load_relaxed(&bbitmap->chunk_max_accessed); + if mi_unlikely(chunk_idx > oldmax) { + mi_atomic_cas_strong_relaxed(&bbitmap->chunk_max_accessed, &oldmax, chunk_idx); + } +} + +// Set a bit in the chunkmap +static void mi_bbitmap_chunkmap_set(mi_bbitmap_t* bbitmap, size_t chunk_idx, bool check_all_set) { + mi_assert(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + if (check_all_set) { + if (mi_bchunk_all_are_set_relaxed(&bbitmap->chunks[chunk_idx])) { + // all slices are free in this chunk: return back to the NONE bin + mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, MI_CBIN_NONE); + } + } + mi_bchunk_set(&bbitmap->chunkmap, chunk_idx, NULL); + mi_bbitmap_chunkmap_set_max(bbitmap, chunk_idx); +} + +static bool mi_bbitmap_chunkmap_try_clear(mi_bbitmap_t* bbitmap, size_t chunk_idx) { + mi_assert(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + // check if the corresponding chunk is all clear + if (!mi_bchunk_all_are_clear_relaxed(&bbitmap->chunks[chunk_idx])) return false; + // clear the chunkmap bit + mi_bchunk_clear(&bbitmap->chunkmap, chunk_idx, NULL); + // .. but a concurrent set may have happened in between our all-clear test and the clearing of the + // bit in the mask. We check again to catch this situation. (note: mi_bchunk_clear must be acq-rel) + if (!mi_bchunk_all_are_clear_relaxed(&bbitmap->chunks[chunk_idx])) { + mi_bchunk_set(&bbitmap->chunkmap, chunk_idx, NULL); return false; } + mi_bbitmap_chunkmap_set_max(bbitmap, chunk_idx); + return true; } -// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. -// Starts at idx, and wraps around to search in all `bitmap_fields` fields. -bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) { - mi_assert_internal(count > 0); - if (count==1) return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx); - size_t idx = start_field_idx; - for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { - if (idx >= bitmap_fields) idx = 0; // wrap - // try to claim inside the field - if (count <= MI_BITMAP_FIELD_BITS) { - if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { - return true; - } +/* -------------------------------------------------------------------------------- + mi_bbitmap_setN, try_clearN, and is_xsetN + (used to find free pages) +-------------------------------------------------------------------------------- */ + +// Set a sequence of `n` bits in the bitmap; returns `true` if atomically transitioned from 0's to 1's (or 1's to 0's). +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +bool mi_bbitmap_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + + const bool were_allclear = mi_bchunk_setN(&bbitmap->chunks[chunk_idx], cidx, n, NULL); + mi_bbitmap_chunkmap_set(bbitmap, chunk_idx, true); // set after + return were_allclear; +} + + +// ------- mi_bbitmap_try_clearN --------------------------------------- + +bool mi_bbitmap_try_clearN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + mi_assert_internal(idx + n <= mi_bbitmap_max_bits(bbitmap)); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + if (cidx + n > MI_BCHUNK_BITS) return false; + bool maybe_all_clear; + const bool cleared = mi_bchunk_try_clearN(&bbitmap->chunks[chunk_idx], cidx, n, &maybe_all_clear); + if (cleared && maybe_all_clear) { mi_bbitmap_chunkmap_try_clear(bbitmap, chunk_idx); } + // note: we don't set the size class for an explicit try_clearN (only used by purging) + return cleared; +} + + +// ------- mi_bbitmap_is_xset --------------------------------------- + +// Is a sequence of n bits already all set/cleared? +bool mi_bbitmap_is_xsetN(mi_xset_t set, mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + mi_assert_internal(n>0); + mi_assert_internal(n<=MI_BCHUNK_BITS); + mi_assert_internal(idx + n <= mi_bbitmap_max_bits(bbitmap)); + + const size_t chunk_idx = idx / MI_BCHUNK_BITS; + const size_t cidx = idx % MI_BCHUNK_BITS; + mi_assert_internal(cidx + n <= MI_BCHUNK_BITS); // don't cross chunks (for now) + mi_assert_internal(chunk_idx < mi_bbitmap_chunk_count(bbitmap)); + if (cidx + n > MI_BCHUNK_BITS) { n = MI_BCHUNK_BITS - cidx; } // paranoia + + return mi_bchunk_is_xsetN(set, &bbitmap->chunks[chunk_idx], cidx, n); +} + + + + +/* -------------------------------------------------------------------------------- + mi_bbitmap_find + (used to find free pages) +-------------------------------------------------------------------------------- */ + +typedef bool (mi_bchunk_try_find_and_clear_fun_t)(mi_bchunk_t* chunk, size_t n, size_t* idx); + +// Go through the bbitmap and for every sequence of `n` set bits, call the visitor function. +// If it returns `true` stop the search. +// +// This is used for finding free blocks and it is important to be efficient (with 2-level bitscan) +// but also reduce fragmentation (through size bins). +static inline bool mi_bbitmap_try_find_and_clear_generic(mi_bbitmap_t* bbitmap, size_t tseq, size_t n, size_t* pidx, mi_bchunk_try_find_and_clear_fun_t* on_find) +{ + // we space out threads to reduce contention + const size_t cmap_max_count = _mi_divide_up(mi_bbitmap_chunk_count(bbitmap),MI_BFIELD_BITS); + const size_t chunk_acc = mi_atomic_load_relaxed(&bbitmap->chunk_max_accessed); + const size_t cmap_acc = chunk_acc / MI_BFIELD_BITS; + const size_t cmap_acc_bits = 1 + (chunk_acc % MI_BFIELD_BITS); + + // create a mask over the chunkmap entries to iterate over them efficiently + mi_assert_internal(MI_BFIELD_BITS >= MI_BCHUNK_FIELDS); + const mi_bfield_t cmap_mask = mi_bfield_mask(cmap_max_count,0); + const size_t cmap_cycle = cmap_acc+1; + const mi_chunkbin_t bbin = mi_chunkbin_of(n); + // visit each cmap entry + size_t cmap_idx = 0; + mi_bfield_cycle_iterate(cmap_mask, tseq, cmap_cycle, cmap_idx, X) + { + // and for each chunkmap entry we iterate over its bits to find the chunks + const mi_bfield_t cmap_entry = mi_atomic_load_relaxed(&bbitmap->chunkmap.bfields[cmap_idx]); + const size_t cmap_entry_cycle = (cmap_idx != cmap_acc ? MI_BFIELD_BITS : cmap_acc_bits); + if (cmap_entry == 0) continue; + + // get size bin masks + mi_bfield_t cmap_bins[MI_CBIN_COUNT] = { 0 }; + cmap_bins[MI_CBIN_NONE] = cmap_entry; + for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin < MI_CBIN_NONE; ibin = mi_chunkbin_inc(ibin)) { + const mi_bfield_t cmap_bin = mi_atomic_load_relaxed(&bbitmap->chunkmap_bins[ibin].bfields[cmap_idx]); + cmap_bins[ibin] = cmap_bin & cmap_entry; + cmap_bins[MI_CBIN_NONE] &= ~cmap_bin; // clear bits that are in an assigned size bin } - // try to claim across fields - if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) { - return true; + + // consider only chunks for a particular size bin at a time + // this picks the best bin only within a cmap entry (~ 1GiB address space), but avoids multiple + // iterations through all entries. + mi_assert_internal(bbin < MI_CBIN_NONE); + for (mi_chunkbin_t ibin = MI_CBIN_SMALL; ibin <= MI_CBIN_NONE; + // skip from bbin to NONE (so, say, a SMALL will never be placed in a OTHER, MEDIUM, or LARGE chunk to reduce fragmentation) + ibin = (ibin == bbin ? MI_CBIN_NONE : mi_chunkbin_inc(ibin))) + { + mi_assert_internal(ibin < MI_CBIN_COUNT); + const mi_bfield_t cmap_bin = cmap_bins[ibin]; + size_t eidx = 0; + mi_bfield_cycle_iterate(cmap_bin, tseq, cmap_entry_cycle, eidx, Y) + { + // assertion doesn't quite hold as the max_accessed may be out-of-date + // mi_assert_internal(cmap_entry_cycle > eidx || ibin == MI_CBIN_NONE); + + // get the chunk + const size_t chunk_idx = cmap_idx*MI_BFIELD_BITS + eidx; + mi_bchunk_t* chunk = &bbitmap->chunks[chunk_idx]; + + size_t cidx; + if ((*on_find)(chunk, n, &cidx)) { + if (cidx==0 && ibin == MI_CBIN_NONE) { // only the first block determines the size bin + // this chunk is now reserved for the `bbin` size class + mi_bbitmap_set_chunk_bin(bbitmap, chunk_idx, bbin); + } + *pidx = (chunk_idx * MI_BCHUNK_BITS) + cidx; + mi_assert_internal(*pidx + n <= mi_bbitmap_max_bits(bbitmap)); + return true; + } + else { + // todo: should _on_find_ return a boolen if there is a chance all are clear to avoid calling `try_clear?` + // we may find that all are cleared only on a second iteration but that is ok as the chunkmap is a conservative approximation. + mi_bbitmap_chunkmap_try_clear(bbitmap, chunk_idx); + } + } + mi_bfield_cycle_iterate_end(Y); } } + mi_bfield_cycle_iterate_end(X); return false; } -// Helper for masks across fields; returns the mid count, post_mask may be 0 -static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) { - MI_UNUSED_RELEASE(bitmap_fields); - const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); - if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) { - *pre_mask = mi_bitmap_mask_(count, bitidx); - *mid_mask = 0; - *post_mask = 0; - mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields); - return 0; - } - else { - const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx; - mi_assert_internal(pre_bits < count); - *pre_mask = mi_bitmap_mask_(pre_bits, bitidx); - count -= pre_bits; - const size_t mid_count = (count / MI_BITMAP_FIELD_BITS); - *mid_mask = MI_BITMAP_FIELD_FULL; - count %= MI_BITMAP_FIELD_BITS; - *post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0)); - mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields); - return mid_count; - } -} - -// Set `count` bits at `bitmap_idx` to 0 atomically -// Returns `true` if all `count` bits were 1 previously. -bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - size_t idx = mi_bitmap_index_field(bitmap_idx); - size_t pre_mask; - size_t mid_mask; - size_t post_mask; - size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); - bool all_one = true; - mi_bitmap_field_t* field = &bitmap[idx]; - size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); - if ((prev & pre_mask) != pre_mask) all_one = false; - while(mid_count-- > 0) { - prev = mi_atomic_and_acq_rel(field++, ~mid_mask); - if ((prev & mid_mask) != mid_mask) all_one = false; - } - if (post_mask!=0) { - prev = mi_atomic_and_acq_rel(field, ~post_mask); - if ((prev & post_mask) != post_mask) all_one = false; - } - return all_one; -} - -// Set `count` bits at `bitmap_idx` to 1 atomically -// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) { - size_t idx = mi_bitmap_index_field(bitmap_idx); - size_t pre_mask; - size_t mid_mask; - size_t post_mask; - size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); - bool all_zero = true; - bool any_zero = false; - _Atomic(size_t)*field = &bitmap[idx]; - size_t prev = mi_atomic_or_acq_rel(field++, pre_mask); - if ((prev & pre_mask) != 0) all_zero = false; - if ((prev & pre_mask) != pre_mask) any_zero = true; - while (mid_count-- > 0) { - prev = mi_atomic_or_acq_rel(field++, mid_mask); - if ((prev & mid_mask) != 0) all_zero = false; - if ((prev & mid_mask) != mid_mask) any_zero = true; - } - if (post_mask!=0) { - prev = mi_atomic_or_acq_rel(field, post_mask); - if ((prev & post_mask) != 0) all_zero = false; - if ((prev & post_mask) != post_mask) any_zero = true; - } - if (pany_zero != NULL) *pany_zero = any_zero; - return all_zero; -} - - -// Returns `true` if all `count` bits were 1. -// `any_ones` is `true` if there was at least one bit set to one. -static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) { - size_t idx = mi_bitmap_index_field(bitmap_idx); - size_t pre_mask; - size_t mid_mask; - size_t post_mask; - size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); - bool all_ones = true; - bool any_ones = false; - mi_bitmap_field_t* field = &bitmap[idx]; - size_t prev = mi_atomic_load_relaxed(field++); - if ((prev & pre_mask) != pre_mask) all_ones = false; - if ((prev & pre_mask) != 0) any_ones = true; - while (mid_count-- > 0) { - prev = mi_atomic_load_relaxed(field++); - if ((prev & mid_mask) != mid_mask) all_ones = false; - if ((prev & mid_mask) != 0) any_ones = true; - } - if (post_mask!=0) { - prev = mi_atomic_load_relaxed(field); - if ((prev & post_mask) != post_mask) all_ones = false; - if ((prev & post_mask) != 0) any_ones = true; - } - if (pany_ones != NULL) *pany_ones = any_ones; - return all_ones; + +/* -------------------------------------------------------------------------------- + mi_bbitmap_try_find_and_clear -- used to find free pages + note: the compiler will fully inline the indirect function calls +-------------------------------------------------------------------------------- */ + +bool mi_bbitmap_try_find_and_clear(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx) { + return mi_bbitmap_try_find_and_clear_generic(bbitmap, tseq, 1, pidx, &mi_bchunk_try_find_and_clear_1); } -bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL); +bool mi_bbitmap_try_find_and_clear8(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx) { + return mi_bbitmap_try_find_and_clear_generic(bbitmap, tseq, 8, pidx, &mi_bchunk_try_find_and_clear_8); +} + +// bool mi_bbitmap_try_find_and_clearX(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx) { +// return mi_bbitmap_try_find_and_clear_generic(bbitmap, tseq, MI_BFIELD_BITS, pidx, &mi_bchunk_try_find_and_clear_X); +// } + +bool mi_bbitmap_try_find_and_clearNX(mi_bbitmap_t* bbitmap, size_t tseq, size_t n, size_t* pidx) { + mi_assert_internal(n<=MI_BFIELD_BITS); + return mi_bbitmap_try_find_and_clear_generic(bbitmap, tseq, n, pidx, &mi_bchunk_try_find_and_clearNX); } -bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { - bool any_ones; - mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); - return any_ones; +bool mi_bbitmap_try_find_and_clearN_(mi_bbitmap_t* bbitmap, size_t tseq, size_t n, size_t* pidx) { + mi_assert_internal(n<=MI_BCHUNK_BITS); + return mi_bbitmap_try_find_and_clear_generic(bbitmap, tseq, n, pidx, &mi_bchunk_try_find_and_clearN_); } diff --git a/depends/mimalloc/src/bitmap.h b/depends/mimalloc/src/bitmap.h index 0c501ec1feac..b9c9dfb41b06 100644 --- a/depends/mimalloc/src/bitmap.h +++ b/depends/mimalloc/src/bitmap.h @@ -1,111 +1,330 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2019-2020 Microsoft Research, Daan Leijen +Copyright (c) 2019-2024 Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ /* ---------------------------------------------------------------------------- -Concurrent bitmap that can set/reset sequences of bits atomically, -represeted as an array of fields where each field is a machine word (`size_t`) - -There are two api's; the standard one cannot have sequences that cross -between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). -(this is used in region allocation) - -The `_across` postfixed functions do allow sequences that can cross over -between the fields. (This is used in arena allocation) +Concurrent bitmap that can set/reset sequences of bits atomically ---------------------------------------------------------------------------- */ #pragma once #ifndef MI_BITMAP_H #define MI_BITMAP_H -/* ----------------------------------------------------------- - Bitmap definition ------------------------------------------------------------ */ +/* -------------------------------------------------------------------------------- + Atomic bitmaps with release/acquire guarantees: + + `mi_bfield_t`: is a single machine word that can efficiently be bit counted (usually `size_t`) + each bit usually represents a single MI_ARENA_SLICE_SIZE in an arena (64 KiB). + We need 16K bits to represent a 1GiB arena. + + `mi_bchunk_t`: a chunk of bfield's of a total of MI_BCHUNK_BITS (= 512 on 64-bit, 256 on 32-bit) + allocations never span across chunks -- so MI_ARENA_MAX_OBJ_SIZE is the number + of bits in a chunk times the MI_ARENA_SLICE_SIZE (512 * 64KiB = 32 MiB). + These chunks are cache-aligned and we can use AVX2/AVX512/NEON/SVE/SVE2/etc. instructions + to scan for bits (perhaps) more efficiently. + + We allocate byte-sized ranges aligned to bytes in the bfield, and bfield-sized + ranges aligned to a bfield. + + Searching linearly through the chunks would be too slow (16K bits per GiB). + Instead we add a "chunkmap" to do a two-level search (more or less a btree of depth 2). + + `mi_bchunkmap_t` (== `mi_bchunk_t`): for each chunk we track if it has (potentially) any bit set. + The chunkmap has 1 bit per chunk that is set if the chunk potentially has a bit set. + This is used to avoid scanning every chunk. (and thus strictly an optimization) + It is conservative: it is fine to set a bit in the chunk map even if the chunk turns out + to have no bits set. It is also allowed to briefly have a clear bit even if the + chunk has bits set -- as long as we guarantee that the bit will be set later on; + (this allows us to set the chunkmap bit right after we set a bit in the corresponding chunk). + + However, when we clear a bit in a chunk, and the chunk is indeed all clear, we + cannot safely clear the bit corresponding to the chunk in the chunkmap since it + may race with another thread setting a bit in the same chunk. Therefore, when + clearing, we first test if a chunk is clear, then clear the chunkmap bit, and + then test again to catch any set bits that we may have missed. + + Since the chunkmap may thus be briefly out-of-sync, this means that we may sometimes + not find a free page even though it's there (but we accept this as we avoid taking + full locks). (Another way to do this is to use an epoch but we like to avoid that complexity + for now). + + `mi_bitmap_t`: a bitmap with N chunks. A bitmap has a chunkmap of MI_BCHUNK_BITS (512) + and thus has at most 512 chunks (=2^18 bits x 64 KiB slices = 16 GiB max arena size). + The minimum is 1 chunk which is a 32 MiB arena. + + For now, the implementation assumes MI_HAS_FAST_BITSCAN and uses trailing-zero-count + and pop-count (but we think it can be adapted work reasonably well on older hardware too) +--------------------------------------------------------------------------------------------- */ + +// A word-size bit field. +typedef size_t mi_bfield_t; + +#define MI_BFIELD_BITS_SHIFT (MI_SIZE_SHIFT+3) +#define MI_BFIELD_BITS (1 << MI_BFIELD_BITS_SHIFT) +#define MI_BFIELD_SIZE (MI_BFIELD_BITS/8) +#define MI_BFIELD_LO_BIT8 (((~(mi_bfield_t)0))/0xFF) // 0x01010101 .. +#define MI_BFIELD_HI_BIT8 (MI_BFIELD_LO_BIT8 << 7) // 0x80808080 .. + +#define MI_BCHUNK_SIZE (MI_BCHUNK_BITS / 8) +#define MI_BCHUNK_FIELDS (MI_BCHUNK_BITS / MI_BFIELD_BITS) // 8 on both 64- and 32-bit + + +// some compiler (msvc in C mode) cannot have expressions in the alignment attribute +#if MI_BCHUNK_SIZE==64 +#define mi_decl_bchunk_align mi_decl_align(64) +#elif MI_BCHUNK_SIZE==32 +#define mi_decl_bchunk_align mi_decl_align(32) +#else +#define mi_decl_bchunk_align mi_decl_align(MI_BCHUNK_SIZE) +#endif + + +// A bitmap chunk contains 512 bits on 64-bit (256 on 32-bit) +typedef mi_decl_bchunk_align struct mi_bchunk_s { + _Atomic(mi_bfield_t) bfields[MI_BCHUNK_FIELDS]; +} mi_bchunk_t; + + +// The chunkmap has one bit per corresponding chunk that is set if the chunk potentially has bits set. +// The chunkmap is itself a chunk. +typedef mi_bchunk_t mi_bchunkmap_t; + +#define MI_BCHUNKMAP_BITS MI_BCHUNK_BITS + +#define MI_BITMAP_MAX_CHUNK_COUNT (MI_BCHUNKMAP_BITS) +#define MI_BITMAP_MIN_CHUNK_COUNT (1) +#if MI_SIZE_BITS > 32 +#define MI_BITMAP_DEFAULT_CHUNK_COUNT (64) // 2 GiB on 64-bit -- this is for the page map +#else +#define MI_BITMAP_DEFAULT_CHUNK_COUNT (1) +#endif +#define MI_BITMAP_MAX_BIT_COUNT (MI_BITMAP_MAX_CHUNK_COUNT * MI_BCHUNK_BITS) // 16 GiB arena +#define MI_BITMAP_MIN_BIT_COUNT (MI_BITMAP_MIN_CHUNK_COUNT * MI_BCHUNK_BITS) // 32 MiB arena +#define MI_BITMAP_DEFAULT_BIT_COUNT (MI_BITMAP_DEFAULT_CHUNK_COUNT * MI_BCHUNK_BITS) // 2 GiB arena + + +// An atomic bitmap +typedef mi_decl_bchunk_align struct mi_bitmap_s { + _Atomic(size_t) chunk_count; // total count of chunks (0 < N <= MI_BCHUNKMAP_BITS) + size_t _padding[MI_BCHUNK_SIZE/MI_SIZE_SIZE - 1]; // suppress warning on msvc + mi_bchunkmap_t chunkmap; + mi_bchunk_t chunks[MI_BITMAP_DEFAULT_CHUNK_COUNT]; // usually dynamic MI_BITMAP_MAX_CHUNK_COUNT +} mi_bitmap_t; + + +static inline size_t mi_bitmap_chunk_count(const mi_bitmap_t* bitmap) { + return mi_atomic_load_relaxed(&((mi_bitmap_t*)bitmap)->chunk_count); +} + +static inline size_t mi_bitmap_max_bits(const mi_bitmap_t* bitmap) { + return (mi_bitmap_chunk_count(bitmap) * MI_BCHUNK_BITS); +} + + + +/* -------------------------------------------------------------------------------- + Atomic bitmap operations +-------------------------------------------------------------------------------- */ + +// Many operations are generic over setting or clearing the bit sequence: we use `mi_xset_t` for this (true if setting, false if clearing) +typedef bool mi_xset_t; +#define MI_BIT_SET (true) +#define MI_BIT_CLEAR (false) + + +// Required size of a bitmap to represent `bit_count` bits. +size_t mi_bitmap_size(size_t bit_count, size_t* chunk_count); + +// Initialize a bitmap to all clear; avoid a mem_zero if `already_zero` is true +// returns the size of the bitmap. +size_t mi_bitmap_init(mi_bitmap_t* bitmap, size_t bit_count, bool already_zero); + +// Set/clear a sequence of `n` bits in the bitmap (and can cross chunks). +// Not atomic so only use if still local to a thread. +void mi_bitmap_unsafe_setN(mi_bitmap_t* bitmap, size_t idx, size_t n); -#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE) -#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set -// An atomic bitmap of `size_t` fields -typedef _Atomic(size_t) mi_bitmap_field_t; -typedef mi_bitmap_field_t* mi_bitmap_t; +// Set a bit in the bitmap; returns `true` if it atomically transitioned from 0 to 1 +bool mi_bitmap_set(mi_bitmap_t* bitmap, size_t idx); -// A bitmap index is the index of the bit in a bitmap. -typedef size_t mi_bitmap_index_t; +// Clear a bit in the bitmap; returns `true` if it atomically transitioned from 1 to 0 +bool mi_bitmap_clear(mi_bitmap_t* bitmap, size_t idx); -// Create a bit index. -static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) { - mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS); - return (idx*MI_BITMAP_FIELD_BITS) + bitidx; +// Set a sequence of `n` bits in the bitmap; returns `true` if atomically transitioned from all 0's to 1's +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +// If `already_set` is not NULL, it is set to count of bits were already all set. +// (this is used for correct statistics if commiting over a partially committed area) +bool mi_bitmap_setN(mi_bitmap_t* bitmap, size_t idx, size_t n, size_t* already_set); + +// Clear a sequence of `n` bits in the bitmap; returns `true` if atomically transitioned from all 1's to 0's +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +bool mi_bitmap_clearN(mi_bitmap_t* bitmap, size_t idx, size_t n); + + +// Is a sequence of n bits already all set/cleared? +bool mi_bitmap_is_xsetN(mi_xset_t set, mi_bitmap_t* bitmap, size_t idx, size_t n); + +// Is a sequence of n bits already set? +// (Used to check if a memory range is already committed) +static inline bool mi_bitmap_is_setN(mi_bitmap_t* bitmap, size_t idx, size_t n) { + return mi_bitmap_is_xsetN(MI_BIT_SET, bitmap, idx, n); +} + +// Is a sequence of n bits already clear? +static inline bool mi_bitmap_is_clearN(mi_bitmap_t* bitmap, size_t idx, size_t n) { + return mi_bitmap_is_xsetN(MI_BIT_CLEAR, bitmap, idx, n); +} + +static inline bool mi_bitmap_is_set(mi_bitmap_t* bitmap, size_t idx) { + return mi_bitmap_is_setN(bitmap, idx, 1); +} + +static inline bool mi_bitmap_is_clear(mi_bitmap_t* bitmap, size_t idx) { + return mi_bitmap_is_clearN(bitmap, idx, 1); +} + +// Called once a bit is cleared to see if the memory slice can be claimed. +typedef bool (mi_claim_fun_t)(size_t slice_index, mi_arena_t* arena, mi_heaptag_t heap_tag, bool* keep_set); + +// Find a set bits in the bitmap, atomically clear it, and check if `claim` returns true. +// If not claimed, continue on (potentially setting the bit again depending on `keep_set`). +// Returns true on success, and in that case sets the index: `0 <= *pidx <= MI_BITMAP_MAX_BITS-n`. +mi_decl_nodiscard bool mi_bitmap_try_find_and_claim(mi_bitmap_t* bitmap, size_t tseq, size_t* pidx, + mi_claim_fun_t* claim, mi_arena_t* arena, mi_heaptag_t heap_tag ); + + +// Atomically clear a bit but only if it is set. Will block otherwise until the bit is set. +// This is used to delay free-ing a page that it at the same time being considered to be +// allocated from `mi_arena_try_abandoned` (and is in the `claim` function of `mi_bitmap_try_find_and_claim`). +void mi_bitmap_clear_once_set(mi_bitmap_t* bitmap, size_t idx); + + +// If a bit is set in the bitmap, return `true` and set `idx` to the index of the highest bit. +// Otherwise return `false` (and `*idx` is undefined). +// Used for unloading arena's +bool mi_bitmap_bsr(mi_bitmap_t* bitmap, size_t* idx); + +// Return count of all set bits in a bitmap. +size_t mi_bitmap_popcount(mi_bitmap_t* bitmap); + + +typedef bool (mi_forall_set_fun_t)(size_t slice_index, size_t slice_count, mi_arena_t* arena, void* arg2); + +// Visit all set bits in a bitmap (`slice_count == 1`) +bool _mi_bitmap_forall_set(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_arena_t* arena, void* arg); + +// Visit all set bits in a bitmap with larger ranges if possible (`slice_count >= 1`) +bool _mi_bitmap_forall_setc_ranges(mi_bitmap_t* bitmap, mi_forall_set_fun_t* visit, mi_arena_t* arena, void* arg); + + +// Count all set bits in given range in the bitmap. (cannot cross chunks) +size_t mi_bitmap_popcountN( mi_bitmap_t* bitmap, size_t idx, size_t n); + +/* ---------------------------------------------------------------------------- + Binned concurrent bitmap + Assigns a size class to each chunk such that small blocks don't cause too + much fragmentation since we keep chunks for larger blocks separate. +---------------------------------------------------------------------------- */ + +// mi_chunkbin_t is defined in mimalloc-stats.h + +static inline mi_chunkbin_t mi_chunkbin_inc(mi_chunkbin_t bbin) { + mi_assert_internal(bbin < MI_CBIN_COUNT); + return (mi_chunkbin_t)((int)bbin + 1); } -// Create a bit index. -static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { - return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS); +static inline mi_chunkbin_t mi_chunkbin_dec(mi_chunkbin_t bbin) { + mi_assert_internal(bbin > MI_CBIN_NONE); + return (mi_chunkbin_t)((int)bbin - 1); } -// Get the field index from a bit index. -static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) { - return (bitmap_idx / MI_BITMAP_FIELD_BITS); +static inline mi_chunkbin_t mi_chunkbin_of(size_t slice_count) { + if (slice_count==1) return MI_CBIN_SMALL; + if (slice_count==8) return MI_CBIN_MEDIUM; + #if MI_ENABLE_LARGE_PAGES + if (slice_count==MI_BFIELD_BITS) return MI_CBIN_LARGE; + #endif + return MI_CBIN_OTHER; } -// Get the bit index in a bitmap field -static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) { - return (bitmap_idx % MI_BITMAP_FIELD_BITS); +// An atomic "binned" bitmap for the free slices where we keep chunks reserved for particalar size classes +typedef mi_decl_bchunk_align struct mi_bbitmap_s { + _Atomic(size_t) chunk_count; // total count of chunks (0 < N <= MI_BCHUNKMAP_BITS) + _Atomic(size_t) chunk_max_accessed; // max chunk index that was once cleared or set + #if (MI_BCHUNK_SIZE / MI_SIZE_SIZE) > 2 + size_t _padding[MI_BCHUNK_SIZE/MI_SIZE_SIZE - 2]; // suppress warning on msvc by aligning manually + #endif + mi_bchunkmap_t chunkmap; + mi_bchunkmap_t chunkmap_bins[MI_CBIN_COUNT - 1]; // chunkmaps with bit set if the chunk is in that size class (excluding MI_CBIN_NONE) + mi_bchunk_t chunks[MI_BITMAP_DEFAULT_CHUNK_COUNT]; // usually dynamic MI_BITMAP_MAX_CHUNK_COUNT +} mi_bbitmap_t; + + +static inline size_t mi_bbitmap_chunk_count(const mi_bbitmap_t* bbitmap) { + return mi_atomic_load_relaxed(&((mi_bbitmap_t*)bbitmap)->chunk_count); } -// Get the full bit index -static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) { - return bitmap_idx; +static inline size_t mi_bbitmap_max_bits(const mi_bbitmap_t* bbitmap) { + return (mi_bbitmap_chunk_count(bbitmap) * MI_BCHUNK_BITS); } -/* ----------------------------------------------------------- - Claim a bit sequence atomically ------------------------------------------------------------ */ +mi_chunkbin_t mi_bbitmap_debug_get_bin(const mi_bchunk_t* chunkmap_bins, size_t chunk_idx); -// Try to atomically claim a sequence of `count` bits in a single -// field at `idx` in `bitmap`. Returns `true` on success. -bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx); +size_t mi_bbitmap_size(size_t bit_count, size_t* chunk_count); -// Starts at idx, and wraps around to search in all `bitmap_fields` fields. -// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. -bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); -// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled -typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg); -bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx); +// Initialize a bitmap to all clear; avoid a mem_zero if `already_zero` is true +// returns the size of the bitmap. +size_t mi_bbitmap_init(mi_bbitmap_t* bbitmap, size_t bit_count, bool already_zero); -// Set `count` bits at `bitmap_idx` to 0 atomically -// Returns `true` if all `count` bits were 1 previously. -bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +// Set/clear a sequence of `n` bits in the bitmap (and can cross chunks). +// Not atomic so only use if still local to a thread. +void mi_bbitmap_unsafe_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n); -// Set `count` bits at `bitmap_idx` to 1 atomically -// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero); -bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); -bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +// Set a sequence of `n` bits in the bbitmap; returns `true` if atomically transitioned from all 0's to 1's +// `n` cannot cross chunk boundaries (and `n <= MI_BCHUNK_BITS`)! +bool mi_bbitmap_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n); -//-------------------------------------------------------------------------- -// the `_across` functions work on bitmaps where sequences can cross over -// between the fields. This is used in arena allocation -//-------------------------------------------------------------------------- +// Is a sequence of n bits already all set/cleared? +bool mi_bbitmap_is_xsetN(mi_xset_t set, mi_bbitmap_t* bbitmap, size_t idx, size_t n); -// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. -// Starts at idx, and wraps around to search in all `bitmap_fields` fields. -bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); +// Is a sequence of n bits already set? +// (Used to check if a memory range is already committed) +static inline bool mi_bbitmap_is_setN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + return mi_bbitmap_is_xsetN(MI_BIT_SET, bbitmap, idx, n); +} -// Set `count` bits at `bitmap_idx` to 0 atomically -// Returns `true` if all `count` bits were 1 previously. -bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +// Is a sequence of n bits already clear? +static inline bool mi_bbitmap_is_clearN(mi_bbitmap_t* bbitmap, size_t idx, size_t n) { + return mi_bbitmap_is_xsetN(MI_BIT_CLEAR, bbitmap, idx, n); +} -// Set `count` bits at `bitmap_idx` to 1 atomically -// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. -bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero); -bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); -bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +// Try to atomically transition `n` bits from all set to all clear. Returns `true` on succes. +// `n` cannot cross chunk boundaries, where `n <= MI_CHUNK_BITS`. +bool mi_bbitmap_try_clearN(mi_bbitmap_t* bbitmap, size_t idx, size_t n); -#endif +// Specialized versions for common bit sequence sizes +bool mi_bbitmap_try_find_and_clear(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx); // 1-bit +bool mi_bbitmap_try_find_and_clear8(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx); // 8-bits +// bool mi_bbitmap_try_find_and_clearX(mi_bbitmap_t* bbitmap, size_t tseq, size_t* pidx); // MI_BFIELD_BITS +bool mi_bbitmap_try_find_and_clearNX(mi_bbitmap_t* bbitmap, size_t n, size_t tseq, size_t* pidx); // < MI_BFIELD_BITS +bool mi_bbitmap_try_find_and_clearN_(mi_bbitmap_t* bbitmap, size_t n, size_t tseq, size_t* pidx); // > MI_BFIELD_BITS <= MI_BCHUNK_BITS + +// Find a sequence of `n` bits in the bbitmap with all bits set, and try to atomically clear all. +// Returns true on success, and in that case sets the index: `0 <= *pidx <= MI_BITMAP_MAX_BITS-n`. +mi_decl_nodiscard static inline bool mi_bbitmap_try_find_and_clearN(mi_bbitmap_t* bbitmap, size_t n, size_t tseq, size_t* pidx) { + if (n==1) return mi_bbitmap_try_find_and_clear(bbitmap, tseq, pidx); // small pages + if (n==8) return mi_bbitmap_try_find_and_clear8(bbitmap, tseq, pidx); // medium pages + // if (n==MI_BFIELD_BITS) return mi_bbitmap_try_find_and_clearX(bbitmap, tseq, pidx); // large pages + if (n==0 || n>MI_BCHUNK_BITS) return false; // cannot be more than a chunk + if (n<=MI_BFIELD_BITS) return mi_bbitmap_try_find_and_clearNX(bbitmap, tseq, n, pidx); + return mi_bbitmap_try_find_and_clearN_(bbitmap, tseq, n, pidx); +} + + +#endif // MI_BITMAP_H diff --git a/depends/mimalloc/src/free.c b/depends/mimalloc/src/free.c new file mode 100644 index 000000000000..40e813809336 --- /dev/null +++ b/depends/mimalloc/src/free.c @@ -0,0 +1,577 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#if !defined(MI_IN_ALLOC_C) +#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)" +// add includes help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() +#endif + +// forward declarations +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block); +static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block); +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block); +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block); + + +// ------------------------------------------------------ +// Free +// ------------------------------------------------------ + +// regular free of a (thread local) block pointer +// fast path written carefully to prevent spilling on the stack +static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full) +{ + // checks + if mi_unlikely(mi_check_is_double_free(page, block)) return; + mi_check_padding(page, block); + if (track_stats) { mi_stat_free(page, block); } + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_GUARDED + memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); + #endif + if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned + + // actual free: push on the local free list + mi_block_set_next(page, block, page->local_free); + page->local_free = block; + if mi_unlikely(--page->used == 0) { + _mi_page_retire(page); + } + else if mi_unlikely(check_full && mi_page_is_in_full(page)) { + _mi_page_unfull(page); + } +} + +// Forward declaration for multi-threaded collect +static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* mt_free) mi_attr_noexcept; + +// Free a block multi-threaded +static inline void mi_free_block_mt(mi_page_t* page, mi_block_t* block) mi_attr_noexcept +{ + // adjust stats (after padding check and potentially recursive `mi_free` above) + mi_stat_free(page, block); // stat_free may access the padding + mi_track_free_size(block, mi_page_usable_size_of(page, block)); + + // _mi_padding_shrink(page, block, sizeof(mi_block_t)); +#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading + size_t dbgsize = mi_usable_size(block); + if (dbgsize > MI_MiB) { dbgsize = MI_MiB; } + _mi_memset_aligned(block, MI_DEBUG_FREED, dbgsize); +#endif + + // push atomically on the page thread free list + mi_thread_free_t tf_new; + mi_thread_free_t tf_old = mi_atomic_load_relaxed(&page->xthread_free); + do { + mi_block_set_next(page, block, mi_tf_block(tf_old)); + tf_new = mi_tf_create(block, true /* always use owned: try to claim it if the page is abandoned */); + } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_old, tf_new)); // todo: release is enough? + + // and atomically try to collect the page if it was abandoned + const bool is_owned_now = !mi_tf_is_owned(tf_old); + if (is_owned_now) { + mi_assert_internal(mi_page_is_abandoned(page)); + mi_free_try_collect_mt(page,block); + } +} + + +// Adjust a block that was allocated aligned, to the actual start of the block in the page. +// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the +// `page_start` and `block_size` fields; however these are constant and the page won't be +// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently. +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { + mi_assert_internal(page!=NULL && p!=NULL); + + size_t diff = (uint8_t*)p - mi_page_start(page); + size_t adjust; + if mi_likely(page->block_size_shift != 0) { + adjust = diff & (((size_t)1 << page->block_size_shift) - 1); + } + else { + adjust = diff % mi_page_block_size(page); + } + + return (mi_block_t*)((uintptr_t)p - adjust); +} + +// forward declaration for a MI_GUARDED build +#if MI_GUARDED +static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p); // forward declaration +static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) { + if (mi_block_ptr_is_guarded(block, p)) { mi_block_unguard(page, block, p); } +} +#else +static inline void mi_block_check_unguard(mi_page_t* page, mi_block_t* block, void* p) { + MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(p); +} +#endif + + +// free a local pointer (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, void* p) mi_attr_noexcept { + mi_assert_internal(p!=NULL && page != NULL); + mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p); + mi_block_check_unguard(page, block, p); + mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */); +} + +// free a pointer owned by another thread (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, void* p) mi_attr_noexcept { + mi_assert_internal(p!=NULL && page != NULL); + mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865) + mi_block_check_unguard(page, block, p); + mi_free_block_mt(page, block); +} + +// generic free (for runtime integration) +void mi_decl_noinline _mi_free_generic(mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { + if (is_local) mi_free_generic_local(page,p); + else mi_free_generic_mt(page,p); +} + + +// Get the page belonging to a pointer +// Does further checks in debug mode to see if this was a valid pointer. +static inline mi_page_t* mi_validate_ptr_page(const void* p, const char* msg) +{ + MI_UNUSED_RELEASE(msg); + #if MI_DEBUG + if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0 && !mi_option_is_enabled(mi_option_guarded_precise)) { + _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); + return NULL; + } + mi_page_t* page = _mi_safe_ptr_page(p); + if (p != NULL && page == NULL) { + _mi_error_message(EINVAL, "%s: invalid pointer: %p\n", msg, p); + } + return page; + #else + return _mi_ptr_page(p); + #endif +} + +// Free a block +// Fast path written carefully to prevent register spilling on the stack +void mi_free(void* p) mi_attr_noexcept +{ + mi_page_t* const page = mi_validate_ptr_page(p,"mi_free"); + if mi_unlikely(page==NULL) return; // page will be NULL if p==NULL + mi_assert_internal(p!=NULL && page!=NULL); + + const mi_threadid_t xtid = (_mi_prim_thread_id() ^ mi_page_xthread_id(page)); + if mi_likely(xtid == 0) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) == 0` + // thread-local, aligned, and not a full page + mi_block_t* const block = (mi_block_t*)p; + mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */); + } + else if (xtid <= MI_PAGE_FLAG_MASK) { // `tid == mi_page_thread_id(page) && mi_page_flags(page) != 0` + // page is local, but is full or contains (inner) aligned blocks; use generic path + mi_free_generic_local(page, p); + } + // free-ing in a page owned by a heap in another thread, or an abandoned page (not belonging to a heap) + else if ((xtid & MI_PAGE_FLAG_MASK) == 0) { // `tid != mi_page_thread_id(page) && mi_page_flags(page) == 0` + // blocks are aligned (and not a full page); push on the thread_free list + mi_block_t* const block = (mi_block_t*)p; + mi_free_block_mt(page,block); + } + else { + // page is full or contains (inner) aligned blocks; use generic multi-thread path + mi_free_generic_mt(page, p); + } +} + + +// ------------------------------------------------------ +// Multi-threaded Free (`_mt`) +// ------------------------------------------------------ +static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free); + +static inline bool mi_page_queue_len_is_atmost( mi_heap_t* heap, size_t block_size, long atmost) { + mi_page_queue_t* const pq = mi_page_queue(heap,block_size); + mi_assert_internal(pq!=NULL); + return (pq->count <= (size_t)atmost); +} + +static void mi_decl_noinline mi_free_try_collect_mt(mi_page_t* page, mi_block_t* mt_free) mi_attr_noexcept { + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + + // we own the page now.. + // safe to collect the thread atomic free list + // use the `_partly` version to avoid atomic operations since we already have the `mt_free` pointing into the thread free list + _mi_page_free_collect_partly(page, mt_free); + + #if MI_DEBUG > 1 + if (mi_page_is_singleton(page)) { mi_assert_internal(mi_page_all_free(page)); } + #endif + + // 1. free if the page is free now (this is updated by `_mi_page_free_collect_partly`) + if (mi_page_all_free(page)) + { + // first remove it from the abandoned pages in the arena (if mapped, this might wait for any readers to finish) + _mi_arenas_page_unabandon(page); + // we can free the page directly + _mi_arenas_page_free(page,NULL); + return; + } + + // 2. we can try to reclaim the page for ourselves + // note: reclaiming can improve benchmarks like `larson` or `rbtree-ck` a lot even in the single-threaded case, + // since free-ing from an owned page avoids atomic operations. However, if we reclaim too eagerly in + // a multi-threaded scenario we may start to hold on to too much memory and reduce reuse among threads. + // If the current heap is where the page originally came from, we reclaim much more eagerly while + // 'cross-thread' reclaiming on free is by default off (and we only 'reclaim' these by finding the abandoned + // pages when we allocate a fresh page). + if (page->block_size <= MI_SMALL_MAX_OBJ_SIZE) // only for small sized blocks + { + const long reclaim_on_free = _mi_option_get_fast(mi_option_page_reclaim_on_free); + if (reclaim_on_free >= 0) { // and reclaiming is allowed + // get our heap (with the right tag) + // note: don't use `mi_heap_get_default()` as we may just have terminated this thread and we should + // not reinitialize the heap for this thread. (can happen due to thread-local destructors for example -- issue #944) + mi_heap_t* heap = mi_prim_get_default_heap(); + if (heap != page->heap) { + if (mi_heap_is_initialized(heap)) { + heap = _mi_heap_by_tag(heap, page->heap_tag); + } + } + // can we reclaim into this heap? + if (heap != NULL && heap->allow_page_reclaim) + { + long max_reclaim = 0; + if mi_likely(heap == page->heap) { // did this page originate from the current heap? + // originating heap + max_reclaim = _mi_option_get_fast(heap->tld->is_in_threadpool ? mi_option_page_cross_thread_max_reclaim : mi_option_page_max_reclaim); + } + else if (reclaim_on_free == 1 && // if cross-thread is allowed + !heap->tld->is_in_threadpool && // and we are not part of a threadpool + !mi_page_is_used_at_frac(page,8) && // and the page is not too full + _mi_arena_memid_is_suitable(page->memid, heap->exclusive_arena)) { // and it fits our memory + // across threads + max_reclaim = _mi_option_get_fast(mi_option_page_cross_thread_max_reclaim); + } + + if (max_reclaim < 0 || mi_page_queue_len_is_atmost(heap, page->block_size, max_reclaim)) { // are we within the reclaim limit? + // reclaim the page into this heap + // first remove it from the abandoned pages in the arena -- this might wait for any readers to finish + _mi_arenas_page_unabandon(page); + _mi_heap_page_reclaim(heap, page); + mi_heap_stat_counter_increase(heap, pages_reclaim_on_free, 1); + return; + } + } + } + } + + // 3. if the page is unmapped, try to reabandon so it can possibly be mapped and found for allocations + if (!mi_page_is_used_at_frac(page, 8) && // only reabandon if a full page starts to have enough blocks available to prevent immediate re-abandon of a full page + !mi_page_is_abandoned_mapped(page) && page->memid.memkind == MI_MEM_ARENA && + _mi_arenas_page_try_reabandon_to_mapped(page)) + { + return; + } + + + // not reclaimed or free'd, unown again + // _mi_page_unown(page); + mi_page_unown_from_free(page, mt_free); +} + + +// release ownership of a page. This may free the page if all (other) blocks were concurrently +// freed in the meantime. Returns true if the page was freed. +// This is a specialized version of `mi_page_unown` to (try to) avoid calling `mi_page_free_collect` again. +static bool mi_page_unown_from_free(mi_page_t* page, mi_block_t* mt_free) { + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + mi_assert_internal(mt_free != NULL); + mi_assert_internal(page->used > 1); + mi_thread_free_t tf_expect = mi_tf_create(mt_free, true); + mi_thread_free_t tf_new = mi_tf_create(mt_free, false); + while mi_unlikely(!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tf_expect, tf_new)) { + mi_assert_internal(mi_tf_is_owned(tf_expect)); + while (mi_tf_block(tf_expect) != NULL) { + _mi_page_free_collect(page,false); // update used + if (mi_page_all_free(page)) { // it may become free just before unowning it + _mi_arenas_page_unabandon(page); + _mi_arenas_page_free(page,NULL); + return true; + } + tf_expect = mi_atomic_load_relaxed(&page->xthread_free); + } + mi_assert_internal(mi_tf_block(tf_expect)==NULL); + tf_new = mi_tf_create(NULL, false); + } + return false; +} + + +// ------------------------------------------------------ +// Usable size +// ------------------------------------------------------ + +// Bytes available in a block +static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept { + const mi_block_t* block = _mi_page_ptr_unalign(page, p); + const size_t size = mi_page_usable_size_of(page, block); + const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; + mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); + const size_t aligned_size = (size - adjust); + #if MI_GUARDED + if (mi_block_ptr_is_guarded(block, p)) { + return aligned_size - _mi_os_page_size(); + } + #endif + return aligned_size; +} + +static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { + const mi_page_t* const page = mi_validate_ptr_page(p,msg); + if mi_unlikely(page==NULL) return 0; + if mi_likely(!mi_page_has_aligned(page)) { + const mi_block_t* block = (const mi_block_t*)p; + return mi_page_usable_size_of(page, block); + } + else { + // split out to separate routine for improved code generation + return mi_page_usable_aligned_size_of(page, p); + } +} + +mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { + return _mi_usable_size(p, "mi_usable_size"); +} + + +// ------------------------------------------------------ +// Free variants +// ------------------------------------------------------ + +void mi_free_size(void* p, size_t size) mi_attr_noexcept { + MI_UNUSED_RELEASE(size); + #if MI_DEBUG + const size_t available = _mi_usable_size(p,"mi_free_size"); + mi_assert(p == NULL || size <= available || available == 0 /* invalid pointer */ ); + #endif + mi_free(p); +} + +void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free_size(p,size); +} + +void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free(p); +} + + +// ------------------------------------------------------ +// Check for double free in secure and debug mode +// This is somewhat expensive so only enabled for secure mode 4 +// ------------------------------------------------------ + +#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) +// linear check if the free list contains a specific element +static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { + while (list != NULL) { + if (elem==list) return true; + list = mi_block_next(page, list); + } + return false; +} + +static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { + // The decoded value is in the same page (or NULL). + // Walk the free lists to verify positively if it is already freed + if (mi_list_contains(page, page->free, block) || + mi_list_contains(page, page->local_free, block) || + mi_list_contains(page, mi_page_thread_free(page), block)) + { + _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); + return true; + } + return false; +} + +#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } + +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + bool is_double_free = false; + mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field + if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? + (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? + { + // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // (continue in separate function to improve code generation) + is_double_free = mi_check_is_double_freex(page, block); + } + return is_double_free; +} +#else +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); + return false; +} +#endif + + +// --------------------------------------------------------------------------- +// Check for heap block overflow by setting up padding at the end of the block +// --------------------------------------------------------------------------- + +#if MI_PADDING // && !MI_TRACK_ENABLED +static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { + *bsize = mi_page_usable_block_size(page); + const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + *delta = padding->delta; + uint32_t canary = padding->canary; + uintptr_t keys[2]; + keys[0] = page->keys[0]; + keys[1] = page->keys[1]; + bool ok = (mi_ptr_encode_canary(page,block,keys) == canary && *delta <= *bsize); + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); + return ok; +} + +// Return the exact usable size of a block. +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); mi_assert_internal(delta <= bsize); + return (ok ? bsize - delta : 0); +} + +// When a non-thread-local block is freed, it becomes part of the thread delayed free +// list that is freed later by the owning heap. If the exact usable size is too small to +// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) +// so it will later not trigger an overflow error in `mi_free_block`. +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); + if (!ok || (bsize - delta) >= min_size) return; // usually already enough space + mi_assert_internal(bsize >= min_size); + if (bsize < min_size) return; // should never happen + size_t new_delta = (bsize - min_size); + mi_assert_internal(new_delta < bsize); + mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + padding->delta = (uint32_t)new_delta; + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); +} +#else +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(block); + return mi_page_usable_block_size(page); +} + +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + MI_UNUSED(page); + MI_UNUSED(block); + MI_UNUSED(min_size); +} +#endif + +#if MI_PADDING && MI_PADDING_CHECK + +static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + *size = *wrong = bsize; + if (!ok) return false; + mi_assert_internal(bsize >= delta); + *size = bsize - delta; + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)block + bsize - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes + mi_track_mem_defined(fill, maxpad); + for (size_t i = 0; i < maxpad; i++) { + if (fill[i] != MI_DEBUG_PADDING) { + *wrong = bsize - delta + i; + ok = false; + break; + } + } + mi_track_mem_noaccess(fill, maxpad); + } + return ok; +} + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + size_t size; + size_t wrong; + if (!mi_verify_padding(page,block,&size,&wrong)) { + _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); + } +} + +#else + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); +} + +#endif + +// only maintain stats for smaller objects if requested +#if (MI_STAT>0) +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(block); + mi_heap_t* const heap = mi_heap_get_default(); + const size_t bsize = mi_page_usable_block_size(page); + // #if (MI_STAT>1) + // const size_t usize = mi_page_usable_size_of(page, block); + // mi_heap_stat_decrease(heap, malloc_requested, usize); + // #endif + if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { + mi_heap_stat_decrease(heap, malloc_normal, bsize); + #if (MI_STAT > 1) + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], 1); + #endif + } + else { + const size_t bpsize = mi_page_block_size(page); // match stat in page.c:mi_huge_page_alloc + mi_heap_stat_decrease(heap, malloc_huge, bpsize); + } +} +#else +void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); MI_UNUSED(block); +} +#endif + + +// Remove guard page when building with MI_GUARDED +#if MI_GUARDED +static void mi_block_unguard(mi_page_t* page, mi_block_t* block, void* p) { + MI_UNUSED(p); + mi_assert_internal(mi_block_ptr_is_guarded(block, p)); + mi_assert_internal(mi_page_has_aligned(page)); + mi_assert_internal((uint8_t*)p - (uint8_t*)block >= (ptrdiff_t)sizeof(mi_block_t)); + mi_assert_internal(block->next == MI_BLOCK_TAG_GUARDED); + + const size_t bsize = mi_page_block_size(page); + const size_t psize = _mi_os_page_size(); + mi_assert_internal(bsize > psize); + mi_assert_internal(!page->memid.is_pinned); + void* gpage = (uint8_t*)block + bsize - psize; + mi_assert_internal(_mi_is_aligned(gpage, psize)); + _mi_os_unprotect(gpage, psize); +} +#endif diff --git a/depends/mimalloc/src/heap.c b/depends/mimalloc/src/heap.c index 15ca360312f1..fb0a6c9d379f 100644 --- a/depends/mimalloc/src/heap.c +++ b/depends/mimalloc/src/heap.c @@ -6,10 +6,8 @@ terms of the MIT license. A copy of the license can be found in the file -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include // memset, memcpy +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap #if defined(_MSC_VER) && (_MSC_VER < 1920) #pragma warning(disable:4204) // non-constant aggregate initializer @@ -30,15 +28,18 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void // visit all pages #if MI_DEBUG>1 size_t total = heap->page_count; - #endif size_t count = 0; + #endif + for (size_t i = 0; i <= MI_BIN_FULL; i++) { mi_page_queue_t* pq = &heap->pages[i]; mi_page_t* page = pq->first; while(page != NULL) { mi_page_t* next = page->next; // save next in case the page gets removed from the queue mi_assert_internal(mi_page_heap(page) == heap); + #if MI_DEBUG>1 count++; + #endif if (!fn(heap, pq, page, arg1, arg2)) return false; page = next; // and continue } @@ -54,8 +55,6 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ MI_UNUSED(arg2); MI_UNUSED(pq); mi_assert_internal(mi_page_heap(page) == heap); - mi_segment_t* segment = _mi_page_segment(page); - mi_assert_internal(segment->thread_id == heap->thread_id); mi_assert_expensive(_mi_page_is_valid(page)); return true; } @@ -64,6 +63,9 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ static bool mi_heap_is_valid(mi_heap_t* heap) { mi_assert_internal(heap!=NULL); mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); + for (size_t bin = 0; bin < MI_BIN_COUNT; bin++) { + mi_assert_internal(_mi_page_queue_is_valid(heap, &heap->pages[bin])); + } return true; } #endif @@ -92,9 +94,9 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t mi_collect_t collect = *((mi_collect_t*)arg_collect); _mi_page_free_collect(page, collect >= MI_FORCE); if (mi_page_all_free(page)) { - // no more used blocks, free the page. + // no more used blocks, free the page. // note: this will free retired pages as well. - _mi_page_free(page, pq, collect >= MI_FORCE); + _mi_page_free(page, pq); } else if (collect == MI_ABANDON) { // still used blocks but the thread is done; abandon the page @@ -103,70 +105,32 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t return true; // don't break } -static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { - MI_UNUSED(arg1); - MI_UNUSED(arg2); - MI_UNUSED(heap); - MI_UNUSED(pq); - _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); - return true; // don't break -} static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) { if (heap==NULL || !mi_heap_is_initialized(heap)) return; + mi_assert_expensive(mi_heap_is_valid(heap)); - const bool force = collect >= MI_FORCE; + const bool force = (collect >= MI_FORCE); _mi_deferred_free(heap, force); - // note: never reclaim on collect but leave it to threads that need storage to reclaim - const bool force_main = - #ifdef NDEBUG - collect == MI_FORCE - #else - collect >= MI_FORCE - #endif - && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim; + // python/cpython#112532: we may be called from a thread that is not the owner of the heap + // const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); - if (force_main) { - // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. - // if all memory is freed by now, all segments should be freed. - _mi_abandoned_reclaim_all(heap, &heap->tld->segments); - } - - // if abandoning, mark all pages to no longer add to delayed_free - if (collect == MI_ABANDON) { - mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); - } - - // free all current thread delayed blocks. - // (if abandoning, after this there are no more thread-delayed references into the pages.) - _mi_heap_delayed_free_all(heap); + // if (_mi_is_main_thread()) { mi_debug_show_arenas(true, false, false); } // collect retired pages _mi_heap_collect_retired(heap, force); // collect all pages owned by this thread mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); - mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); - - // collect abandoned segments (in particular, decommit expired parts of segments in the abandoned segment list) - // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment - _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); - - // collect segment local caches - if (force) { - _mi_segment_thread_collect(&heap->tld->segments); - } - // decommit in global segment caches - // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment - _mi_segment_cache_collect( collect == MI_FORCE, &heap->tld->os); + // collect arenas (this is program wide so don't force purges on abandonment of threads) + //mi_atomic_storei64_release(&heap->tld->subproc->purge_expire, 1); + _mi_arenas_collect(collect == MI_FORCE /* force purge? */, collect >= MI_FORCE /* visit all? */, heap->tld); - // collect regions on program-exit (or shared library unload) - if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) { - //_mi_mem_collect(&heap->tld->os); - } + // merge statistics + if (collect <= MI_FORCE) { _mi_stats_merge_thread(heap->tld); } } void _mi_heap_collect_abandon(mi_heap_t* heap) { @@ -178,7 +142,7 @@ void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { } void mi_collect(bool force) mi_attr_noexcept { - mi_heap_collect(mi_get_default_heap(), force); + mi_heap_collect(mi_prim_get_default_heap(), force); } @@ -187,66 +151,133 @@ void mi_collect(bool force) mi_attr_noexcept { ----------------------------------------------------------- */ mi_heap_t* mi_heap_get_default(void) { - mi_thread_init(); - return mi_get_default_heap(); + mi_heap_t* heap = mi_prim_get_default_heap(); + if mi_unlikely(!mi_heap_is_initialized(heap)) { + mi_thread_init(); + heap = mi_prim_get_default_heap(); + } + return heap; +} + +static bool mi_heap_is_default(const mi_heap_t* heap) { + return (heap == mi_prim_get_default_heap()); } + mi_heap_t* mi_heap_get_backing(void) { mi_heap_t* heap = mi_heap_get_default(); mi_assert_internal(heap!=NULL); mi_heap_t* bheap = heap->tld->heap_backing; mi_assert_internal(bheap!=NULL); - mi_assert_internal(bheap->thread_id == _mi_thread_id()); + mi_assert_internal(bheap->tld->thread_id == _mi_thread_id()); return bheap; } -mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena( mi_arena_id_t arena_id ) { - mi_heap_t* bheap = mi_heap_get_backing(); - mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? - if (heap==NULL) return NULL; +// todo: make order of parameters consistent (but would that break compat with CPython?) +void _mi_heap_init(mi_heap_t* heap, mi_arena_id_t arena_id, bool allow_destroy, uint8_t heap_tag, mi_tld_t* tld) +{ + mi_assert_internal(heap!=NULL); + mi_memid_t memid = heap->memid; _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); - heap->tld = bheap->tld; - heap->thread_id = _mi_thread_id(); - heap->arena_id = arena_id; - _mi_random_split(&bheap->random, &heap->random); + heap->memid = memid; + heap->tld = tld; // avoid reading the thread-local tld during initialization + heap->tag = heap_tag; + heap->numa_node = tld->numa_node; + heap->exclusive_arena = _mi_arena_from_id(arena_id); + heap->allow_page_reclaim = (!allow_destroy && mi_option_get(mi_option_page_reclaim_on_free) >= 0); + heap->allow_page_abandon = (!allow_destroy && mi_option_get(mi_option_page_full_retain) >= 0); + heap->page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + if (heap->tld->is_in_threadpool) { + // if we run as part of a thread pool it is better to not arbitrarily reclaim abandoned pages into our heap. + // this is checked in `free.c:mi_free_try_collect_mt` + // .. but abandoning is good in this case: halve the full page retain (possibly to 0) + // (so blocked threads do not hold on to too much memory) + if (heap->page_full_retain > 0) { + heap->page_full_retain = heap->page_full_retain / 4; + } + } + + if (heap->tld->heap_backing == NULL) { + heap->tld->heap_backing = heap; // first heap becomes the backing heap + _mi_random_init(&heap->random); + } + else { + _mi_random_split(&heap->tld->heap_backing->random, &heap->random); + } heap->cookie = _mi_heap_random_next(heap) | 1; - heap->keys[0] = _mi_heap_random_next(heap); - heap->keys[1] = _mi_heap_random_next(heap); - heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe + //heap->keys[0] = _mi_heap_random_next(heap); + //heap->keys[1] = _mi_heap_random_next(heap);*/ + _mi_heap_guarded_init(heap); + // push on the thread local heaps list heap->next = heap->tld->heaps; heap->tld->heaps = heap; +} + +mi_heap_t* _mi_heap_create(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id, mi_tld_t* tld) { + mi_assert_internal(tld!=NULL); + mi_assert(heap_tag >= 0 && heap_tag < 256); + // allocate and initialize a heap + mi_memid_t memid; + mi_heap_t* heap; + if (arena_id == _mi_arena_id_none()) { + heap = (mi_heap_t*)_mi_meta_zalloc(sizeof(mi_heap_t), &memid); + } + else { + // heaps associated wita a specific arena are allocated in that arena + // note: takes up at least one slice which is quite wasteful... + heap = (mi_heap_t*)_mi_arenas_alloc(_mi_subproc(), _mi_align_up(sizeof(mi_heap_t),MI_ARENA_MIN_OBJ_SIZE), true, true, _mi_arena_from_id(arena_id), tld->thread_seq, tld->numa_node, &memid); + } + if (heap==NULL) { + _mi_error_message(ENOMEM, "unable to allocate heap meta-data\n"); + return NULL; + } + heap->memid = memid; + _mi_heap_init(heap, arena_id, allow_destroy, (uint8_t)heap_tag, tld); return heap; } +mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id) { + mi_heap_t* bheap = mi_heap_get_backing(); + mi_assert_internal(bheap != NULL); + return _mi_heap_create(heap_tag, allow_destroy, arena_id, bheap->tld); +} + +mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { + return mi_heap_new_ex(0 /* default heap tag */, false /* allow destroy? */, arena_id); +} + mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { - return mi_heap_new_in_arena(_mi_arena_id_none()); + // don't reclaim abandoned memory or otherwise destroy is unsafe + return mi_heap_new_ex(0 /* default heap tag */, true /* allow destroy? */, _mi_arena_id_none()); } -bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid) { - return _mi_arena_memid_is_suitable(memid, heap->arena_id); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { + return _mi_arena_memid_is_suitable(memid, heap->exclusive_arena); } uintptr_t _mi_heap_random_next(mi_heap_t* heap) { return _mi_random_next(&heap->random); } +void mi_heap_set_numa_affinity(mi_heap_t* heap, int numa_node) { + if (heap == NULL) return; + heap->numa_node = (numa_node < 0 ? -1 : numa_node % _mi_os_numa_node_count()); +} + // zero out the page queues static void mi_heap_reset_pages(mi_heap_t* heap) { mi_assert_internal(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); // TODO: copy full empty heap instead? - memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); -#ifdef MI_MEDIUM_DIRECT - memset(&heap->pages_free_medium, 0, sizeof(heap->pages_free_medium)); -#endif + _mi_memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); - heap->thread_delayed_free = NULL; + // heap->thread_delayed_free = NULL; heap->page_count = 0; } // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. -static void mi_heap_free(mi_heap_t* heap) { +static void mi_heap_free(mi_heap_t* heap, bool do_free_mem) { mi_assert(heap != NULL); mi_assert_internal(mi_heap_is_initialized(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return; @@ -260,7 +291,7 @@ static void mi_heap_free(mi_heap_t* heap) { // remove ourselves from the thread local heaps list // linear search but we expect the number of heaps to be relatively small mi_heap_t* prev = NULL; - mi_heap_t* curr = heap->tld->heaps; + mi_heap_t* curr = heap->tld->heaps; while (curr != heap && curr != NULL) { prev = curr; curr = curr->next; @@ -273,9 +304,23 @@ static void mi_heap_free(mi_heap_t* heap) { mi_assert_internal(heap->tld->heaps != NULL); // and free the used memory - mi_free(heap); + if (do_free_mem) { + _mi_meta_free(heap, sizeof(*heap), heap->memid); + } } +// return a heap on the same thread as `heap` specialized for the specified tag (if it exists) +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) { + if (heap->tag == tag) { + return heap; + } + for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) { + if (curr->tag == tag) { + return curr; + } + } + return NULL; +} /* ----------------------------------------------------------- Heap destroy @@ -284,33 +329,27 @@ static void mi_heap_free(mi_heap_t* heap) { static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { MI_UNUSED(arg1); MI_UNUSED(arg2); - MI_UNUSED(heap); MI_UNUSED(pq); // ensure no more thread_delayed_free will be added - _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + //_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); // stats const size_t bsize = mi_page_block_size(page); - if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, large, bsize); - } - else { - mi_heap_stat_decrease(heap, huge, bsize); - } + if (bsize > MI_LARGE_MAX_OBJ_SIZE) { + mi_heap_stat_decrease(heap, malloc_huge, bsize); } -#if (MI_STAT) + #if (MI_STAT>0) _mi_page_free_collect(page, false); // update used count const size_t inuse = page->used; - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, normal, bsize * inuse); -#if (MI_STAT>1) - mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); -#endif + if (bsize <= MI_LARGE_MAX_OBJ_SIZE) { + mi_heap_stat_decrease(heap, malloc_normal, bsize * inuse); + #if (MI_STAT>1) + mi_heap_stat_decrease(heap, malloc_bins[_mi_bin(bsize)], inuse); + #endif } - mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... -#endif + // mi_heap_stat_decrease(heap, malloc_requested, bsize * inuse); // todo: off for aligned blocks... + #endif /// pretend it is all free now mi_assert_internal(mi_page_thread_free(page) == NULL); @@ -320,7 +359,8 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ // mi_page_free(page,false); page->next = NULL; page->prev = NULL; - _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); + mi_page_set_heap(page, NULL); + _mi_arenas_page_free(page, heap->tld); return true; // keep going } @@ -330,62 +370,89 @@ void _mi_heap_destroy_pages(mi_heap_t* heap) { mi_heap_reset_pages(heap); } +#if MI_TRACK_HEAP_DESTROY +static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size); + mi_track_free_size(block,mi_usable_size(block)); + return true; +} +#endif + void mi_heap_destroy(mi_heap_t* heap) { mi_assert(heap != NULL); mi_assert(mi_heap_is_initialized(heap)); - mi_assert(heap->no_reclaim); + mi_assert(!heap->allow_page_reclaim); + mi_assert(!heap->allow_page_abandon); mi_assert_expensive(mi_heap_is_valid(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return; - if (!heap->no_reclaim) { - // don't free in case it may contain reclaimed pages + #if MI_GUARDED + // _mi_warning_message("'mi_heap_destroy' called but MI_GUARDED is enabled -- using `mi_heap_delete` instead (heap at %p)\n", heap); + mi_heap_delete(heap); + return; + #else + if (heap->allow_page_reclaim) { + _mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap); + // don't free in case it may contain reclaimed pages, mi_heap_delete(heap); } else { + // track all blocks as freed + #if MI_TRACK_HEAP_DESTROY + mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL); + #endif // free all pages _mi_heap_destroy_pages(heap); - mi_heap_free(heap); + mi_heap_free(heap,true); } + #endif } - +// forcefully destroy all heaps in the current thread +void _mi_heap_unsafe_destroy_all(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + if (heap == NULL) return; + mi_heap_t* curr = heap->tld->heaps; + while (curr != NULL) { + mi_heap_t* next = curr->next; + if (!curr->allow_page_reclaim) { + mi_heap_destroy(curr); + } + else { + _mi_heap_destroy_pages(curr); + } + curr = next; + } +} /* ----------------------------------------------------------- Safe Heap delete ----------------------------------------------------------- */ // Transfer the pages from one heap to the other -static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { - mi_assert_internal(heap!=NULL); - if (from==NULL || from->page_count == 0) return; - - // reduce the size of the delayed frees - _mi_heap_delayed_free_partial(from); - - // transfer all pages by appending the queues; this will set a new heap field - // so threads may do delayed frees in either heap for a while. - // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state - // so after this only the new heap will get delayed frees - for (size_t i = 0; i <= MI_BIN_FULL; i++) { - mi_page_queue_t* pq = &heap->pages[i]; - mi_page_queue_t* append = &from->pages[i]; - size_t pcount = _mi_page_queue_append(heap, pq, append); - heap->page_count += pcount; - from->page_count -= pcount; - } - mi_assert_internal(from->page_count == 0); - - // and do outstanding delayed frees in the `from` heap - // note: be careful here as the `heap` field in all those pages no longer point to `from`, - // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a - // the regular `_mi_free_delayed_block` which is safe. - _mi_heap_delayed_free_all(from); - #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353 - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); - #endif - - // and reset the `from` heap - mi_heap_reset_pages(from); -} +//static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { +// mi_assert_internal(heap!=NULL); +// if (from==NULL || from->page_count == 0) return; +// +// // transfer all pages by appending the queues; this will set a new heap field +// for (size_t i = 0; i <= MI_BIN_FULL; i++) { +// mi_page_queue_t* pq = &heap->pages[i]; +// mi_page_queue_t* append = &from->pages[i]; +// size_t pcount = _mi_page_queue_append(heap, pq, append); +// heap->page_count += pcount; +// from->page_count -= pcount; +// } +// mi_assert_internal(from->page_count == 0); +// +// // and reset the `from` heap +// mi_heap_reset_pages(from); +//} + +//// are two heaps compatible with respect to heap-tag, exclusive arena etc. +//static bool mi_heaps_are_compatible(mi_heap_t* heap1, mi_heap_t* heap2) { +// return (heap1->tag == heap2->tag && // store same kind of objects +// heap1->tld->subproc == heap2->tld->subproc && // same sub-process +// heap1->arena_id == heap2->arena_id); // same arena preference +//} // Safe delete a heap without freeing any still allocated blocks in that heap. void mi_heap_delete(mi_heap_t* heap) @@ -395,16 +462,11 @@ void mi_heap_delete(mi_heap_t* heap) mi_assert_expensive(mi_heap_is_valid(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return; - if (!mi_heap_is_backing(heap)) { - // tranfer still used pages to the backing heap - mi_heap_absorb(heap->tld->heap_backing, heap); - } - else { - // the backing heap abandons its pages - _mi_heap_collect_abandon(heap); - } + // abandon all pages + _mi_heap_collect_abandon(heap); + mi_assert_internal(heap->page_count==0); - mi_heap_free(heap); + mi_heap_free(heap,true); } mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { @@ -412,13 +474,69 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { mi_assert(mi_heap_is_initialized(heap)); if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; mi_assert_expensive(mi_heap_is_valid(heap)); - mi_heap_t* old = mi_get_default_heap(); + mi_heap_t* old = mi_prim_get_default_heap(); _mi_heap_set_default_direct(heap); return old; } +/* ----------------------------------------------------------- + Load/unload heaps +----------------------------------------------------------- */ +void mi_heap_unload(mi_heap_t* heap) { + mi_assert(mi_heap_is_initialized(heap)); + mi_assert_expensive(mi_heap_is_valid(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (heap->exclusive_arena == NULL) { + _mi_warning_message("cannot unload heaps that are not associated with an exclusive arena\n"); + return; + } + // abandon all pages so all thread'id in the pages are cleared + _mi_heap_collect_abandon(heap); + mi_assert_internal(heap->page_count==0); + + // remove from heap list + mi_heap_free(heap, false /* but don't actually free the memory */); + + // disassociate from the current thread-local and static state + heap->tld = NULL; + return; +} + +bool mi_heap_reload(mi_heap_t* heap, mi_arena_id_t arena_id) { + mi_assert(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + if (heap->exclusive_arena == NULL) { + _mi_warning_message("cannot reload heaps that were not associated with an exclusive arena\n"); + return false; + } + if (heap->tld != NULL) { + _mi_warning_message("cannot reload heaps that were not unloaded first\n"); + return false; + } + mi_arena_t* arena = _mi_arena_from_id(arena_id); + if (heap->exclusive_arena != arena) { + _mi_warning_message("trying to reload a heap at a different arena address: %p vs %p\n", heap->exclusive_arena, arena); + return false; + } + + mi_assert_internal(heap->page_count==0); + + // re-associate with the current thread-local and static state + heap->tld = mi_heap_get_default()->tld; + + // reinit direct pages (as we may be in a different process) + mi_assert_internal(heap->page_count == 0); + for (size_t i = 0; i < MI_PAGES_DIRECT; i++) { + heap->pages_free_direct[i] = (mi_page_t*)&_mi_page_empty; + } + + // push on the thread local heaps list + heap->next = heap->tld->heaps; + heap->tld->heaps = heap; + return true; +} /* ----------------------------------------------------------- Analysis @@ -427,11 +545,8 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { // static since it is not thread safe to access heaps from other threads. static mi_heap_t* mi_heap_of_block(const void* p) { if (p == NULL) return NULL; - mi_segment_t* segment = _mi_ptr_segment(p); - bool valid = (_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(valid); - if mi_unlikely(!valid) return NULL; - return mi_page_heap(_mi_segment_page_of(segment,p)); + mi_page_t* page = _mi_ptr_page(p); // TODO: check pointer validity? + return mi_page_heap(page); } bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { @@ -445,8 +560,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa MI_UNUSED(heap); MI_UNUSED(pq); bool* found = (bool*)vfound; - mi_segment_t* segment = _mi_page_segment(page); - void* start = _mi_page_start(segment, page, NULL); + void* start = mi_page_start(page); void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); *found = (p >= start && p < end); return (!*found); // continue if not found @@ -462,7 +576,7 @@ bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { } bool mi_check_owned(const void* p) { - return mi_heap_check_owned(mi_get_default_heap(), p); + return mi_heap_check_owned(mi_prim_get_default_heap(), p); } /* ----------------------------------------------------------- @@ -471,90 +585,152 @@ bool mi_check_owned(const void* p) { enable visiting all blocks of all heaps across threads ----------------------------------------------------------- */ -// Separate struct to keep `mi_page_t` out of the public interface -typedef struct mi_heap_area_ex_s { - mi_heap_area_t area; - mi_page_t* page; -} mi_heap_area_ex_t; +void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) { + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); + area->reserved = page->reserved * bsize; + area->committed = page->capacity * bsize; + area->blocks = mi_page_start(page); + area->used = page->used; // number of blocks in use (#553) + area->block_size = ubsize; + area->full_block_size = bsize; + area->heap_tag = page->heap_tag; +} -static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { - mi_assert(xarea != NULL); - if (xarea==NULL) return true; - const mi_heap_area_t* area = &xarea->area; - mi_page_t* page = xarea->page; + +static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) { + mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX); + *shift = MI_SIZE_BITS - mi_clz(divisor - 1); + *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1); +} + +static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) { + mi_assert_internal(n <= UINT32_MAX); + const uint64_t hi = ((uint64_t)n * magic) >> 32; + return (size_t)((hi + n) >> shift); +} + +bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg) { + mi_assert(area != NULL); + if (area==NULL) return true; mi_assert(page != NULL); if (page == NULL) return true; - _mi_page_free_collect(page,true); + _mi_page_free_collect(page,true); // collect both thread_delayed and local_free mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; - const size_t bsize = mi_page_block_size(page); - const size_t ubsize = mi_page_usable_block_size(page); // without padding - size_t psize; - uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); + size_t psize; + uint8_t* const pstart = mi_page_area(page, &psize); + mi_heap_t* const heap = mi_page_heap(page); + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); // without padding + // optimize page with one block if (page->capacity == 1) { - // optimize page with one block mi_assert_internal(page->used == 1 && page->free == NULL); return visitor(mi_page_heap(page), area, pstart, ubsize, arg); } + mi_assert(bsize <= UINT32_MAX); + + // optimize full pages + if (page->used == page->capacity) { + uint8_t* block = pstart; + for (size_t i = 0; i < page->capacity; i++) { + if (!visitor(heap, area, block, ubsize, arg)) return false; + block += bsize; + } + return true; + } // create a bitmap of free blocks. #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) - uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; - memset(free_map, 0, sizeof(free_map)); + uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS]; + const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS); + memset(free_map, 0, bmapsize * sizeof(intptr_t)); + if (page->capacity % MI_INTPTR_BITS != 0) { + // mark left-over bits at the end as free + size_t shift = (page->capacity % MI_INTPTR_BITS); + uintptr_t mask = (UINTPTR_MAX << shift); + free_map[bmapsize - 1] = mask; + } + + // fast repeated division by the block size + uint64_t magic; + size_t shift; + mi_get_fast_divisor(bsize, &magic, &shift); + #if MI_DEBUG>1 size_t free_count = 0; - for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + #endif + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { + #if MI_DEBUG>1 free_count++; + #endif mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); size_t offset = (uint8_t*)block - pstart; mi_assert_internal(offset % bsize == 0); - size_t blockidx = offset / bsize; // Todo: avoid division? - mi_assert_internal( blockidx < MI_MAX_BLOCKS); - size_t bitidx = (blockidx / sizeof(uintptr_t)); - size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); + mi_assert_internal(offset <= UINT32_MAX); + size_t blockidx = mi_fast_divide(offset, magic, shift); + mi_assert_internal(blockidx == offset / bsize); + mi_assert_internal(blockidx < MI_MAX_BLOCKS); + size_t bitidx = (blockidx / MI_INTPTR_BITS); + size_t bit = blockidx - (bitidx * MI_INTPTR_BITS); free_map[bitidx] |= ((uintptr_t)1 << bit); } mi_assert_internal(page->capacity == (free_count + page->used)); // walk through all blocks skipping the free ones + #if MI_DEBUG>1 size_t used_count = 0; - for (size_t i = 0; i < page->capacity; i++) { - size_t bitidx = (i / sizeof(uintptr_t)); - size_t bit = i - (bitidx * sizeof(uintptr_t)); - uintptr_t m = free_map[bitidx]; - if (bit == 0 && m == UINTPTR_MAX) { - i += (sizeof(uintptr_t) - 1); // skip a run of free blocks + #endif + uint8_t* block = pstart; + for (size_t i = 0; i < bmapsize; i++) { + if (free_map[i] == 0) { + // every block is in use + for (size_t j = 0; j < MI_INTPTR_BITS; j++) { + #if MI_DEBUG>1 + used_count++; + #endif + if (!visitor(heap, area, block, ubsize, arg)) return false; + block += bsize; + } } - else if ((m & ((uintptr_t)1 << bit)) == 0) { - used_count++; - uint8_t* block = pstart + (i * bsize); - if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false; + else { + // visit the used blocks in the mask + uintptr_t m = ~free_map[i]; + while (m != 0) { + #if MI_DEBUG>1 + used_count++; + #endif + size_t bitidx = mi_ctz(m); + if (!visitor(heap, area, block + (bitidx * bsize), ubsize, arg)) return false; + m &= m - 1; // clear least significant bit + } + block += bsize * MI_INTPTR_BITS; } } mi_assert_internal(page->used == used_count); return true; } -typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); +// Separate struct to keep `mi_page_t` out of the public interface +typedef struct mi_heap_area_ex_s { + mi_heap_area_t area; + mi_page_t* page; +} mi_heap_area_ex_t; + +typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); + static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { MI_UNUSED(heap); MI_UNUSED(pq); mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; mi_heap_area_ex_t xarea; - const size_t bsize = mi_page_block_size(page); - const size_t ubsize = mi_page_usable_block_size(page); xarea.page = page; - xarea.area.reserved = page->reserved * bsize; - xarea.area.committed = page->capacity * bsize; - xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL); - xarea.area.used = page->used; // number of blocks in use (#553) - xarea.area.block_size = ubsize; - xarea.area.full_block_size = bsize; + _mi_heap_area_init(&xarea.area, page); return fun(heap, &xarea, arg); } @@ -575,7 +751,7 @@ static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; if (args->visit_blocks) { - return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); + return _mi_heap_area_visit_blocks(&xarea->area, xarea->page, args->visitor, args->arg); } else { return true; diff --git a/depends/mimalloc/src/init.c b/depends/mimalloc/src/init.c index 4f37b71761b3..73847a4b6af1 100644 --- a/depends/mimalloc/src/init.c +++ b/depends/mimalloc/src/init.c @@ -5,32 +5,37 @@ terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" #include // memcpy, memset #include // atexit +#define MI_MEMID_INIT(kind) {{{NULL,0}}, kind, true /* pinned */, true /* committed */, false /* zero */ } +#define MI_MEMID_STATIC MI_MEMID_INIT(MI_MEM_STATIC) + // Empty page used to initialize the small free pages array const mi_page_t _mi_page_empty = { - 0, false, false, false, false, - 0, // capacity - 0, // reserved capacity - { 0 }, // flags - false, // is_zero - 0, // retire_expire - NULL, // free - #if MI_ENCODE_FREELIST - { 0, 0 }, - #endif - 0, // used - 0, // xblock_size - NULL, // local_free - MI_ATOMIC_VAR_INIT(0), // xthread_free - MI_ATOMIC_VAR_INIT(0), // xheap - NULL, NULL - #if MI_INTPTR_SIZE==8 - , { 0 } // padding + MI_ATOMIC_VAR_INIT(0), // xthread_id + NULL, // free + 0, // used + 0, // capacity + 0, // reserved capacity + 0, // block size shift + 0, // retire_expire + NULL, // local_free + MI_ATOMIC_VAR_INIT(0), // xthread_free + 0, // block_size + NULL, // page_start + 0, // heap tag + false, // is_zero + #if (MI_PADDING || MI_ENCODE_FREELIST) + { 0, 0 }, // keys #endif + NULL, // xheap + NULL, NULL, // next, prev + MI_ARENA_SLICE_SIZE, // page_committed + MI_MEMID_STATIC // memid }; #define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) @@ -45,7 +50,7 @@ const mi_page_t _mi_page_empty = { // Empty page queues for every bin -#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) } +#define QNULL(sz) { NULL, NULL, 0, (sz)*sizeof(uintptr_t) } #define MI_PAGE_QUEUES_EMPTY \ { QNULL(1), \ QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \ @@ -57,41 +62,29 @@ const mi_page_t _mi_page_empty = { QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \ QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \ QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \ - QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ - QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ } + QNULL(MI_LARGE_MAX_OBJ_WSIZE + 1 /* 655360, Huge queue */), \ + QNULL(MI_LARGE_MAX_OBJ_WSIZE + 2) /* Full queue */ } -#define MI_STAT_COUNT_NULL() {0,0,0,0} +#define MI_STAT_COUNT_NULL() {0,0,0} // Empty statistics -#if MI_STAT>1 -#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } -#else -#define MI_STAT_COUNT_END_NULL() -#endif - #define MI_STATS_NULL \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ - { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \ - MI_STAT_COUNT_END_NULL() - - -// Empty slice span queues for every bin -#define SQNULL(sz) { NULL, NULL, sz } -#define MI_SEGMENT_SPAN_QUEUES_EMPTY \ - { SQNULL(1), \ - SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \ - SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \ - SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \ - SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \ - SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ } - + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + \ + { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, \ + MI_INIT4(MI_STAT_COUNT_NULL), \ + { 0 }, { 0 }, { 0 }, { 0 }, \ + \ + { MI_INIT4(MI_STAT_COUNT_NULL) }, \ + { { 0 }, { 0 }, { 0 }, { 0 } }, \ + \ + { MI_INIT74(MI_STAT_COUNT_NULL) }, \ + { MI_INIT74(MI_STAT_COUNT_NULL) }, \ + { MI_INIT5(MI_STAT_COUNT_NULL) } // -------------------------------------------------------- // Statically allocate an empty heap as the initial @@ -102,198 +95,380 @@ const mi_page_t _mi_page_empty = { // may lead to allocation itself on some platforms) // -------------------------------------------------------- +static mi_decl_cache_align mi_subproc_t subproc_main +#if __cplusplus += { }; // empty initializer to prevent running the constructor (with msvc) +#else += { 0 }; // C zero initialize +#endif + +static mi_decl_cache_align mi_tld_t tld_empty = { + 0, // thread_id + 0, // thread_seq + 0, // default numa node + &subproc_main, // subproc + NULL, // heap_backing + NULL, // heaps list + 0, // heartbeat + false, // recurse + false, // is_in_threadpool + { MI_STAT_VERSION, MI_STATS_NULL }, // stats + MI_MEMID_STATIC // memid +}; + mi_decl_cache_align const mi_heap_t _mi_heap_empty = { - NULL, + &tld_empty, // tld + NULL, // exclusive_arena + 0, // preferred numa node + 0, // cookie + //{ 0, 0 }, // keys + { {0}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + 0, 0, // generic count + NULL, // next + 0, // full page retain + false, // can reclaim + true, // can eager abandon + 0, // tag + #if MI_GUARDED + 0, 0, 0, 1, // count is 1 so we never write to it (see `internal.h:mi_heap_malloc_use_guarded`) + #endif MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY, - MI_ATOMIC_VAR_INIT(NULL), - 0, // tid - 0, // cookie - 0, // arena id - { 0, 0 }, // keys - { {0}, {0}, 0 }, - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next - false + MI_MEMID_STATIC }; -#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats))) -#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os))) +extern mi_decl_hidden mi_decl_cache_align mi_heap_t heap_main; + +static mi_decl_cache_align mi_tld_t tld_main = { + 0, // thread_id + 0, // thread_seq + 0, // numa node + &subproc_main, // subproc + &heap_main, // heap_backing + &heap_main, // heaps list + 0, // heartbeat + false, // recurse + false, // is_in_threadpool + { MI_STAT_VERSION, MI_STATS_NULL }, // stats + MI_MEMID_STATIC // memid +}; -mi_decl_cache_align static const mi_tld_t tld_empty = { - 0, - false, - NULL, NULL, - { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments - { 0, tld_empty_stats }, // os - { MI_STATS_NULL } // stats +mi_decl_cache_align mi_heap_t heap_main = { + &tld_main, // thread local data + NULL, // exclusive arena + 0, // preferred numa node + 0, // initial cookie + //{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) + { {0x846ca68b}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + 0, 0, // generic count + NULL, // next heap + 2, // full page retain + true, // allow page reclaim + true, // allow page abandon + 0, // tag + #if MI_GUARDED + 0, 0, 0, 0, + #endif + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY, + MI_MEMID_STATIC }; + +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { + return _mi_prim_thread_id(); +} + // the thread-local default heap for allocation mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; -extern mi_heap_t _mi_heap_main; -static mi_tld_t tld_main = { - 0, false, - &_mi_heap_main, & _mi_heap_main, - { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments - { 0, &tld_main.stats }, // os - { MI_STATS_NULL } // stats -}; +bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. -mi_heap_t _mi_heap_main = { - &tld_main, - MI_SMALL_PAGES_EMPTY, - MI_PAGE_QUEUES_EMPTY, - MI_ATOMIC_VAR_INIT(NULL), - 0, // thread id - 0, // initial cookie - 0, // arena id - { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) - { {0x846ca68b}, {0}, 0 }, // random - 0, // page count - MI_BIN_FULL, 0, // page retired min/max - NULL, // next heap - false // can reclaim -}; +mi_stats_t _mi_stats_main = { MI_STAT_VERSION, MI_STATS_NULL }; -bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. +#if MI_GUARDED +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) { + heap->guarded_sample_rate = sample_rate; + heap->guarded_sample_count = sample_rate; // count down samples + if (heap->guarded_sample_rate > 1) { + if (seed == 0) { + seed = _mi_heap_random_next(heap); + } + heap->guarded_sample_count = (seed % heap->guarded_sample_rate) + 1; // start at random count between 1 and `sample_rate` + } +} + +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) { + heap->guarded_size_min = min; + heap->guarded_size_max = (min > max ? min : max); +} + +void _mi_heap_guarded_init(mi_heap_t* heap) { + mi_heap_guarded_set_sample_rate(heap, + (size_t)mi_option_get_clamp(mi_option_guarded_sample_rate, 0, LONG_MAX), + (size_t)mi_option_get(mi_option_guarded_sample_seed)); + mi_heap_guarded_set_size_bound(heap, + (size_t)mi_option_get_clamp(mi_option_guarded_min, 0, LONG_MAX), + (size_t)mi_option_get_clamp(mi_option_guarded_max, 0, LONG_MAX) ); +} +#else +mi_decl_export void mi_heap_guarded_set_sample_rate(mi_heap_t* heap, size_t sample_rate, size_t seed) { + MI_UNUSED(heap); MI_UNUSED(sample_rate); MI_UNUSED(seed); +} + +mi_decl_export void mi_heap_guarded_set_size_bound(mi_heap_t* heap, size_t min, size_t max) { + MI_UNUSED(heap); MI_UNUSED(min); MI_UNUSED(max); +} +void _mi_heap_guarded_init(mi_heap_t* heap) { + MI_UNUSED(heap); +} +#endif -mi_stats_t _mi_stats_main = { MI_STATS_NULL }; +// Initialize main subproc +static void mi_subproc_main_init(void) { + if (subproc_main.memid.memkind != MI_MEM_STATIC) { + subproc_main.memid = _mi_memid_create(MI_MEM_STATIC); + mi_lock_init(&subproc_main.os_abandoned_pages_lock); + mi_lock_init(&subproc_main.arena_reserve_lock); + } +} +// Initialize main tld +static void mi_tld_main_init(void) { + if (tld_main.thread_id == 0) { + tld_main.thread_id = _mi_prim_thread_id(); + } +} +// Initialization of the (statically allocated) main heap, and the main tld and subproc. static void mi_heap_main_init(void) { - if (_mi_heap_main.cookie == 0) { - _mi_heap_main.thread_id = _mi_thread_id(); - _mi_heap_main.cookie = _mi_os_random_weak((uintptr_t)&mi_heap_main_init); - _mi_random_init(&_mi_heap_main.random); - _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main); - _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); + if (heap_main.cookie == 0) { + // heap + heap_main.cookie = 1; + #if defined(__APPLE__) || defined(_WIN32) && !defined(MI_SHARED_LIB) + _mi_random_init_weak(&heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking + #else + _mi_random_init(&heap_main.random); + #endif + heap_main.cookie = _mi_heap_random_next(&heap_main); + //heap_main.keys[0] = _mi_heap_random_next(&heap_main); + //heap_main.keys[1] = _mi_heap_random_next(&heap_main); + _mi_heap_guarded_init(&heap_main); + heap_main.allow_page_reclaim = (mi_option_get(mi_option_page_reclaim_on_free) >= 0); + heap_main.allow_page_abandon = (mi_option_get(mi_option_page_full_retain) >= 0); + heap_main.page_full_retain = mi_option_get_clamp(mi_option_page_full_retain, -1, 32); + + mi_subproc_main_init(); + mi_tld_main_init(); } } mi_heap_t* _mi_heap_main_get(void) { mi_heap_main_init(); - return &_mi_heap_main; + return &heap_main; } /* ----------------------------------------------------------- - Initialization and freeing of the thread local heaps + Thread local data ----------------------------------------------------------- */ -// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size). -typedef struct mi_thread_data_s { - mi_heap_t heap; // must come first due to cast in `_mi_heap_done` - mi_tld_t tld; -} mi_thread_data_t; - - -// Thread meta-data is allocated directly from the OS. For -// some programs that do not use thread pools and allocate and -// destroy many OS threads, this may causes too much overhead -// per thread so we maintain a small cache of recently freed metadata. - -#define TD_CACHE_SIZE (8) -static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE]; - -static mi_thread_data_t* mi_thread_data_alloc(void) { - // try to find thread metadata in the cache - mi_thread_data_t* td; - for (int i = 0; i < TD_CACHE_SIZE; i++) { - td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); - if (td != NULL) { - td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); - if (td != NULL) { - return td; - } - } +// Count current and total created threads +static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1); +static _Atomic(size_t) thread_total_count; + +size_t _mi_current_thread_count(void) { + return mi_atomic_load_relaxed(&thread_count); +} + + +// The mimalloc thread local data +mi_decl_thread mi_tld_t* thread_tld = &tld_empty; + +// Allocate fresh tld +static mi_tld_t* mi_tld_alloc(void) { + mi_atomic_increment_relaxed(&thread_count); + if (_mi_is_main_thread()) { + return &tld_main; } - // if that fails, allocate directly from the OS - td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); - if (td == NULL) { - // if this fails, try once more. (issue #257) - td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); - if (td == NULL) { - // really out of memory - _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); + else { + // allocate tld meta-data + // note: we need to be careful to not access the tld from `_mi_meta_zalloc` + // (and in turn from `_mi_arena_alloc_aligned` and `_mi_os_alloc_aligned`). + mi_memid_t memid; + mi_tld_t* tld = (mi_tld_t*)_mi_meta_zalloc(sizeof(mi_tld_t), &memid); + if (tld==NULL) { + _mi_error_message(ENOMEM, "unable to allocate memory for thread local data\n"); + return NULL; } + tld->memid = memid; + tld->heap_backing = NULL; + tld->heaps = NULL; + tld->subproc = &subproc_main; + tld->numa_node = _mi_os_numa_node(); + tld->thread_id = _mi_prim_thread_id(); + tld->thread_seq = mi_atomic_add_acq_rel(&thread_total_count, 1); + tld->is_in_threadpool = _mi_prim_thread_is_in_threadpool(); + return tld; } - return td; -} - -static void mi_thread_data_free( mi_thread_data_t* tdfree ) { - // try to add the thread metadata to the cache - for (int i = 0; i < TD_CACHE_SIZE; i++) { - mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); - if (td == NULL) { - mi_thread_data_t* expected = NULL; - if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) { - return; - } - } +} + +#define MI_TLD_INVALID ((mi_tld_t*)1) + +mi_decl_noinline static void mi_tld_free(mi_tld_t* tld) { + if (tld != NULL && tld != MI_TLD_INVALID) { + _mi_stats_done(&tld->stats); + _mi_meta_free(tld, sizeof(mi_tld_t), tld->memid); + } + #if 0 + // do not read/write to `thread_tld` on older macOS <= 14 as that will re-initialize the thread local storage + // (since we are calling this during pthread shutdown) + // (and this could happen on other systems as well, so let's never do it) + thread_tld = MI_TLD_INVALID; + #endif + mi_atomic_decrement_relaxed(&thread_count); +} + +static mi_tld_t* mi_tld(void) { + mi_tld_t* tld = thread_tld; + if (tld == MI_TLD_INVALID) { + _mi_error_message(EFAULT, "internal error: tld is accessed after the thread terminated\n"); + thread_tld = &tld_empty; + } + if (tld==&tld_empty) { + thread_tld = tld = mi_tld_alloc(); + } + return tld; +} + +mi_subproc_t* _mi_subproc(void) { + // should work without doing initialization (as it may be called from `_mi_tld -> mi_tld_alloc ... -> os_alloc -> _mi_subproc()` + // todo: this will still fail on OS systems where the first access to a thread-local causes allocation. + // on such systems we can check for this with the _mi_prim_get_default_heap as those are protected (by being + // stored in a TLS slot for example) + mi_heap_t* heap = mi_prim_get_default_heap(); + if (heap == NULL) { + return _mi_subproc_main(); + } + else { + return heap->tld->subproc; // avoid using thread local storage (`thread_tld`) + } +} + + +mi_tld_t* _mi_thread_tld(void) mi_attr_noexcept { + // should work without doing initialization (as it may be called from `_mi_tld -> mi_tld_alloc ... -> os_alloc -> _mi_subproc()` + mi_heap_t* heap = mi_prim_get_default_heap(); + if (heap == NULL) { + return &tld_empty; + } + else { + return heap->tld; } - // if that fails, just free it directly - _mi_os_free(tdfree, sizeof(mi_thread_data_t), &_mi_stats_main); -} - -static void mi_thread_data_collect(void) { - // free all thread metadata from the cache - for (int i = 0; i < TD_CACHE_SIZE; i++) { - mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); - if (td != NULL) { - td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); - if (td != NULL) { - _mi_os_free( td, sizeof(mi_thread_data_t), &_mi_stats_main ); - } +} + +/* ----------------------------------------------------------- + Sub process +----------------------------------------------------------- */ + +mi_subproc_t* _mi_subproc_main(void) { + return &subproc_main; +} + +mi_subproc_id_t mi_subproc_main(void) { + return NULL; +} + +mi_subproc_id_t mi_subproc_new(void) { + mi_memid_t memid; + mi_subproc_t* subproc = (mi_subproc_t*)_mi_meta_zalloc(sizeof(mi_subproc_t),&memid); + if (subproc == NULL) return NULL; + subproc->memid = memid; + mi_lock_init(&subproc->os_abandoned_pages_lock); + mi_lock_init(&subproc->arena_reserve_lock); + return subproc; +} + +mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id) { + return (subproc_id == NULL ? &subproc_main : (mi_subproc_t*)subproc_id); +} + +void mi_subproc_delete(mi_subproc_id_t subproc_id) { + if (subproc_id == NULL) return; + mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id); + // check if there are os pages still.. + bool safe_to_delete = false; + mi_lock(&subproc->os_abandoned_pages_lock) { + if (subproc->os_abandoned_pages == NULL) { + safe_to_delete = true; } } + if (!safe_to_delete) return; + + // merge stats back into the main subproc? + _mi_stats_merge_from(&_mi_subproc_main()->stats, &subproc->stats); + + // safe to release + // todo: should we refcount subprocesses? + mi_lock_done(&subproc->os_abandoned_pages_lock); + mi_lock_done(&subproc->arena_reserve_lock); + _mi_meta_free(subproc, sizeof(mi_subproc_t), subproc->memid); } +void mi_subproc_add_current_thread(mi_subproc_id_t subproc_id) { + mi_tld_t* tld = mi_tld(); + if (tld == NULL) return; + mi_assert(tld->subproc == &subproc_main); + if (tld->subproc != &subproc_main) return; + tld->subproc = _mi_subproc_from_id(subproc_id); +} + + +/* ----------------------------------------------------------- + Allocate heap data +----------------------------------------------------------- */ + // Initialize the thread local default heap, called from `mi_thread_init` -static bool _mi_heap_init(void) { - if (mi_heap_is_initialized(mi_get_default_heap())) return true; +static bool _mi_thread_heap_init(void) { + if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true; if (_mi_is_main_thread()) { - // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization + // mi_assert_internal(heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization // the main heap is statically allocated mi_heap_main_init(); - _mi_heap_set_default_direct(&_mi_heap_main); - //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap()); + _mi_heap_set_default_direct(&heap_main); + //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap()); } else { - // use `_mi_os_alloc` to allocate directly from the OS - mi_thread_data_t* td = mi_thread_data_alloc(); - if (td == NULL) return false; - - // OS allocated so already zero initialized - mi_tld_t* tld = &td->tld; - mi_heap_t* heap = &td->heap; - _mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld)); - _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap)); - heap->thread_id = _mi_thread_id(); - _mi_random_init(&heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; - heap->keys[0] = _mi_heap_random_next(heap); - heap->keys[1] = _mi_heap_random_next(heap); - heap->tld = tld; - tld->heap_backing = heap; - tld->heaps = heap; - tld->segments.stats = &tld->stats; - tld->segments.os = &tld->os; - tld->os.stats = &tld->stats; - _mi_heap_set_default_direct(heap); + // allocates tld data + // note: we cannot access thread-locals yet as that can cause (recursive) allocation + // (on macOS <= 14 for example where the loader allocates thread-local data on demand). + mi_tld_t* tld = mi_tld_alloc(); + + // allocate and initialize the heap + mi_heap_t* heap = _mi_heap_create(0 /* default tag */, false /* allow destroy? */, _mi_arena_id_none(), tld); + + // associate the heap with this thread + // (this is safe, on macOS for example, the heap is set in a dedicated TLS slot and thus does not cause recursive allocation) + _mi_heap_set_default_direct(heap); + + // now that the heap is set for this thread, we can set the thread-local tld. + thread_tld = tld; } return false; } + // Free the thread local default heap (called from `mi_thread_done`) -static bool _mi_heap_done(mi_heap_t* heap) { +static bool _mi_thread_heap_done(mi_heap_t* heap) { if (!mi_heap_is_initialized(heap)) return true; // reset default heap - _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty); + _mi_heap_set_default_direct(_mi_is_main_thread() ? &heap_main : (mi_heap_t*)&_mi_heap_empty); // switch to backing heap heap = heap->tld->heap_backing; @@ -313,30 +488,22 @@ static bool _mi_heap_done(mi_heap_t* heap) { mi_assert_internal(mi_heap_is_backing(heap)); // collect if not the main thread - if (heap != &_mi_heap_main) { + if (heap != &heap_main) { _mi_heap_collect_abandon(heap); } - - // merge stats - _mi_stats_done(&heap->tld->stats); - - // free if not the main thread - if (heap != &_mi_heap_main) { - // the following assertion does not always hold for huge segments as those are always treated - // as abondened: one may allocate it in one thread, but deallocate in another in which case - // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363 - // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id()); - mi_thread_data_free((mi_thread_data_t*)heap); - } - else { - mi_thread_data_collect(); // free cached thread metadata - #if 0 + + // free heap meta data + _mi_meta_free(heap, sizeof(mi_heap_t), heap->memid); + + if (heap == &heap_main) { + #if 0 // never free the main thread even in debug mode; if a dll is linked statically with mimalloc, // there may still be delete/free calls after the mi_fls_done is called. Issue #207 _mi_heap_destroy_pages(heap); - mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main); + mi_assert_internal(heap->tld->heap_backing == &heap_main); #endif } + return false; } @@ -350,7 +517,7 @@ static bool _mi_heap_done(mi_heap_t* heap) { // 1. windows dynamic library: // call from DllMain on DLL_THREAD_DETACH // 2. windows static library: -// use `FlsAlloc` to call a destructor when the thread is done +// use special linker section to call a destructor when the thread is done // 3. unix, pthreads: // use a pthread key to call a destructor when a pthread is done // @@ -358,101 +525,74 @@ static bool _mi_heap_done(mi_heap_t* heap) { // to set up the thread local keys. // -------------------------------------------------------- -static void _mi_thread_done(mi_heap_t* default_heap); - -#if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain -#elif defined(_WIN32) && !defined(MI_SHARED_LIB) - // use thread local storage keys to detect thread ending - #include - #include - #if (_WIN32_WINNT < 0x600) // before Windows Vista - WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); - WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); - WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); - WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); - #endif - static DWORD mi_fls_key = (DWORD)(-1); - static void NTAPI mi_fls_done(PVOID value) { - if (value!=NULL) _mi_thread_done((mi_heap_t*)value); - } -#elif defined(MI_USE_PTHREADS) - // use pthread local storage keys to detect thread ending - // (and used with MI_TLS_PTHREADS for the default heap) - pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); - static void mi_pthread_done(void* value) { - if (value!=NULL) _mi_thread_done((mi_heap_t*)value); - } -#elif defined(__wasi__) -// no pthreads in the WebAssembly Standard Interface -#else - #pragma message("define a way to call mi_thread_done when a thread is done") -#endif - // Set up handlers so `mi_thread_done` is called automatically static void mi_process_setup_auto_thread_done(void) { static bool tls_initialized = false; // fine if it races if (tls_initialized) return; tls_initialized = true; - #if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain - #elif defined(_WIN32) && !defined(MI_SHARED_LIB) - mi_fls_key = FlsAlloc(&mi_fls_done); - #elif defined(MI_USE_PTHREADS) - mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); - pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); - #endif - _mi_heap_set_default_direct(&_mi_heap_main); + _mi_prim_thread_init_auto_done(); + _mi_heap_set_default_direct(&heap_main); } bool _mi_is_main_thread(void) { - return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id()); + return (tld_main.thread_id==0 || tld_main.thread_id == _mi_thread_id()); } -static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1); - -size_t _mi_current_thread_count(void) { - return mi_atomic_load_relaxed(&thread_count); -} // This is called from the `mi_malloc_generic` void mi_thread_init(void) mi_attr_noexcept { // ensure our process has started already mi_process_init(); - + // initialize the thread local default heap // (this will call `_mi_heap_set_default_direct` and thus set the // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called) - if (_mi_heap_init()) return; // returns true if already initialized + if (_mi_thread_heap_init()) return; // returns true if already initialized - _mi_stat_increase(&_mi_stats_main.threads, 1); - mi_atomic_increment_relaxed(&thread_count); + mi_subproc_stat_increase(_mi_subproc_main(), threads, 1); //_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id()); } void mi_thread_done(void) mi_attr_noexcept { - _mi_thread_done(mi_get_default_heap()); + _mi_thread_done(NULL); } -static void _mi_thread_done(mi_heap_t* heap) { - mi_atomic_decrement_relaxed(&thread_count); - _mi_stat_decrease(&_mi_stats_main.threads, 1); +void _mi_thread_done(mi_heap_t* heap) +{ + // calling with NULL implies using the default heap + if (heap == NULL) { + heap = mi_prim_get_default_heap(); + if (heap == NULL) return; + } + + // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699) + if (!mi_heap_is_initialized(heap)) { + return; + } + + // adjust stats + mi_subproc_stat_decrease(_mi_subproc_main(), threads, 1); // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps... - if (heap->thread_id != _mi_thread_id()) return; - + if (heap->tld->thread_id != _mi_prim_thread_id()) return; + // abandon the thread local heap - if (_mi_heap_done(heap)) return; // returns true if already ran + // note: we store the tld as we should avoid reading `thread_tld` at this point (to avoid reinitializing the thread local storage) + mi_tld_t* tld = heap->tld; + _mi_thread_heap_done(heap); // returns true if already ran + + // free thread local data + mi_tld_free(tld); } void _mi_heap_set_default_direct(mi_heap_t* heap) { mi_assert_internal(heap != NULL); #if defined(MI_TLS_SLOT) - mi_tls_slot_set(MI_TLS_SLOT,heap); + mi_prim_tls_slot_set(MI_TLS_SLOT,heap); #elif defined(MI_TLS_PTHREAD_SLOT_OFS) - *mi_tls_pthread_heap_slot() = heap; + *mi_prim_tls_pthread_heap_slot() = heap; #elif defined(MI_TLS_PTHREAD) // we use _mi_heap_default_key #else @@ -461,134 +601,133 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) { // ensure the default heap is passed to `_mi_thread_done` // setting to a non-NULL value also ensures `mi_thread_done` is called. - #if defined(_WIN32) && defined(MI_SHARED_LIB) - // nothing to do as it is done in DllMain - #elif defined(_WIN32) && !defined(MI_SHARED_LIB) - mi_assert_internal(mi_fls_key != 0); - FlsSetValue(mi_fls_key, heap); - #elif defined(MI_USE_PTHREADS) - if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD - pthread_setspecific(_mi_heap_default_key, heap); - } - #endif + _mi_prim_thread_associate_default_heap(heap); } +void mi_thread_set_in_threadpool(void) mi_attr_noexcept { + mi_tld_t* tld = mi_tld(); + if (tld!=NULL) { + tld->is_in_threadpool = true; + } +} // -------------------------------------------------------- // Run functions on process init/done, and thread init/done // -------------------------------------------------------- -static void mi_cdecl mi_process_done(void); - static bool os_preloading = true; // true until this module is initialized -static bool mi_redirected = false; // true if malloc redirects to mi_malloc // Returns true if this module has not been initialized; Don't use C runtime routines until it returns false. -bool _mi_preloading(void) { +bool mi_decl_noinline _mi_preloading(void) { return os_preloading; } +// Returns true if mimalloc was redirected mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept { - return mi_redirected; -} - -// Communicate with the redirection module on Windows -#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) -#ifdef __cplusplus -extern "C" { -#endif -mi_decl_export void _mi_redirect_entry(DWORD reason) { - // called on redirection; careful as this may be called before DllMain - if (reason == DLL_PROCESS_ATTACH) { - mi_redirected = true; - } - else if (reason == DLL_PROCESS_DETACH) { - mi_redirected = false; - } - else if (reason == DLL_THREAD_DETACH) { - mi_thread_done(); - } -} -__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); -__declspec(dllimport) void mi_cdecl mi_allocator_done(void); -#ifdef __cplusplus + return _mi_is_redirected(); } -#endif -#else -static bool mi_allocator_init(const char** message) { - if (message != NULL) *message = NULL; - return true; -} -static void mi_allocator_done(void) { - // nothing to do -} -#endif -// Called once by the process loader -static void mi_process_load(void) { +// Called once by the process loader from `src/prim/prim.c` +void _mi_auto_process_init(void) { mi_heap_main_init(); - #if defined(MI_TLS_RECURSE_GUARD) + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; - MI_UNUSED(dummy); + if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697) #endif os_preloading = false; - #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521) - atexit(&mi_process_done); - #endif + mi_assert_internal(_mi_is_main_thread()); _mi_options_init(); + mi_process_setup_auto_thread_done(); mi_process_init(); - //mi_stats_reset();- - if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); + if (_mi_is_redirected()) _mi_verbose_message("malloc is redirected.\n"); // show message from the redirector (if present) const char* msg = NULL; - mi_allocator_init(&msg); + _mi_allocator_init(&msg); if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { _mi_fputs(NULL,NULL,NULL,msg); } + + // reseed random + _mi_random_reinit_if_weak(&heap_main.random); } -#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) -#include +// CPU features mi_decl_cache_align bool _mi_cpu_has_fsrm = false; +mi_decl_cache_align bool _mi_cpu_has_erms = false; +mi_decl_cache_align bool _mi_cpu_has_popcnt = false; + +#if (MI_ARCH_X64 || MI_ARCH_X86) +#if defined(__GNUC__) +#include +static bool mi_cpuid(uint32_t* regs4, uint32_t level) { + return (__get_cpuid(level, ®s4[0], ®s4[1], ®s4[2], ®s4[3]) == 1); +} + +#elif defined(_MSC_VER) +static bool mi_cpuid(uint32_t* regs4, uint32_t level) { + __cpuid((int32_t*)regs4, (int32_t)level); + return true; +} +#else +static bool mi_cpuid(uint32_t* regs4, uint32_t level) { + MI_UNUSED(regs4); MI_UNUSED(level); + return false; +} +#endif static void mi_detect_cpu_features(void) { - // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) - int32_t cpu_info[4]; - __cpuid(cpu_info, 7); - _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see + // FSRM for fast short rep movsb/stosb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) + // EMRS for fast enhanced rep movsb/stosb support + uint32_t cpu_info[4]; + if (mi_cpuid(cpu_info, 7)) { + _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see + _mi_cpu_has_erms = ((cpu_info[1] & (1 << 9)) != 0); // bit 9 of EBX : see + } + if (mi_cpuid(cpu_info, 1)) { + _mi_cpu_has_popcnt = ((cpu_info[2] & (1 << 23)) != 0); // bit 23 of ECX : see + } } + #else static void mi_detect_cpu_features(void) { - // nothing + #if MI_ARCH_ARM64 + _mi_cpu_has_popcnt = true; + #endif } #endif + // Initialize the process; called by thread_init or the process loader void mi_process_init(void) mi_attr_noexcept { // ensure we are called once - if (_mi_process_is_initialized) return; - _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id()); + static mi_atomic_once_t process_init; + #if _MSC_VER < 1920 + mi_heap_main_init(); // vs2017 can dynamically re-initialize heap_main + #endif + if (!mi_atomic_once(&process_init)) return; _mi_process_is_initialized = true; - mi_process_setup_auto_thread_done(); + _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id()); - mi_detect_cpu_features(); + _mi_stats_init(); _mi_os_init(); + _mi_page_map_init(); mi_heap_main_init(); - #if (MI_DEBUG) - _mi_verbose_message("debug level : %d\n", MI_DEBUG); - #endif - _mi_verbose_message("secure level: %d\n", MI_SECURE); + mi_tld_main_init(); + // the following two can potentially allocate (on freeBSD for locks and thread keys) + mi_subproc_main_init(); + mi_process_setup_auto_thread_done(); mi_thread_init(); - #if defined(_WIN32) && !defined(MI_SHARED_LIB) - // When building as a static lib the FLS cleanup happens to early for the main thread. + #if defined(_WIN32) && defined(MI_WIN_USE_FLS) + // On windows, when building as a static lib the FLS cleanup happens to early for the main thread. // To avoid this, set the FLS value for the main thread to NULL so the fls cleanup // will not call _mi_thread_done on the (still executing) main thread. See issue #508. - FlsSetValue(mi_fls_key, NULL); + _mi_prim_thread_associate_default_heap(NULL); #endif - mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) + // mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) + mi_track_init(); if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024); @@ -598,17 +737,17 @@ void mi_process_init(void) mi_attr_noexcept { } else { mi_reserve_huge_os_pages_interleave(pages, 0, pages*500); } - } + } if (mi_option_is_enabled(mi_option_reserve_os_memory)) { long ksize = mi_option_get(mi_option_reserve_os_memory); if (ksize > 0) { - mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */); + mi_reserve_os_memory((size_t)ksize*MI_KiB, true, true); } } } -// Called when the process is done (through `at_exit`) -static void mi_cdecl mi_process_done(void) { +// Called when the process is done (cdecl as it is used with `at_exit` on some platforms) +void mi_cdecl mi_process_done(void) mi_attr_noexcept { // only shutdown if we were initialized if (!_mi_process_is_initialized) return; // ensure we are called once @@ -616,80 +755,43 @@ static void mi_cdecl mi_process_done(void) { if (process_done) return; process_done = true; - #if defined(_WIN32) && !defined(MI_SHARED_LIB) - FlsFree(mi_fls_key); // call thread-done on all threads (except the main thread) to prevent dangling callback pointer if statically linked with a DLL; Issue #208 - #endif - + // get the default heap so we don't need to acces thread locals anymore + mi_heap_t* heap = mi_prim_get_default_heap(); // use prim to not initialize any heap + mi_assert_internal(heap != NULL); + + // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread + _mi_prim_thread_done_auto_done(); + + #ifndef MI_SKIP_COLLECT_ON_EXIT - #if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB) + #if (MI_DEBUG || !defined(MI_SHARED_LIB)) // free all memory if possible on process exit. This is not needed for a stand-alone process // but should be done if mimalloc is statically linked into another shared library which // is repeatedly loaded/unloaded, see issue #281. - mi_collect(true /* force */ ); + mi_heap_collect(heap, true /* force */ ); #endif #endif + // Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free + // since after process_done there might still be other code running that calls `free` (like at_exit routines, + // or C-runtime termination code. + if (mi_option_is_enabled(mi_option_destroy_on_exit)) { + mi_heap_collect(heap, true /* force */); + _mi_heap_unsafe_destroy_all(heap); // forcefully release all memory held by all heaps (of this thread only!) + _mi_arenas_unsafe_destroy_all(heap->tld); + _mi_page_map_unsafe_destroy(_mi_subproc_main()); + } + //_mi_page_map_unsafe_destroy(_mi_subproc_main()); + if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { - mi_stats_print(NULL); + _mi_stats_print(&_mi_subproc_main()->stats, NULL, NULL); // use always main subproc at process exit to avoid dereferencing the heap (as it may be destroyed by now) } - mi_allocator_done(); - _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); + _mi_allocator_done(); + _mi_verbose_message("process done: 0x%zx\n", tld_main.thread_id); os_preloading = true; // don't call the C runtime anymore } - - -#if defined(_WIN32) && defined(MI_SHARED_LIB) - // Windows DLL: easy to hook into process_init and thread_done - __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { - MI_UNUSED(reserved); - MI_UNUSED(inst); - if (reason==DLL_PROCESS_ATTACH) { - mi_process_load(); - } - else if (reason==DLL_PROCESS_DETACH) { - mi_process_done(); - } - else if (reason==DLL_THREAD_DETACH) { - if (!mi_is_redirected()) { - mi_thread_done(); - } - } - return TRUE; - } - -#elif defined(_MSC_VER) - // MSVC: use data section magic for static libraries - // See - static int _mi_process_init(void) { - mi_process_load(); - return 0; - } - typedef int(*_mi_crt_callback_t)(void); - #if defined(_M_X64) || defined(_M_ARM64) - __pragma(comment(linker, "/include:" "_mi_msvc_initu")) - #pragma section(".CRT$XIU", long, read) - #else - __pragma(comment(linker, "/include:" "__mi_msvc_initu")) - #endif - #pragma data_seg(".CRT$XIU") - mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init }; - #pragma data_seg() - -#elif defined(__cplusplus) - // C++: use static initialization to detect process start - static bool _mi_process_init(void) { - mi_process_load(); - return (_mi_heap_main.thread_id != 0); - } - static bool mi_initialized = _mi_process_init(); - -#elif defined(__GNUC__) || defined(__clang__) - // GCC,Clang: use the constructor attribute - static void __attribute__((constructor)) _mi_process_init(void) { - mi_process_load(); - } - -#else -#pragma message("define a way to call mi_process_load on your platform") -#endif +void mi_cdecl _mi_auto_process_done(void) mi_attr_noexcept { + if (_mi_option_get_fast(mi_option_destroy_on_exit)>1) return; + mi_process_done(); +} diff --git a/depends/mimalloc/src/libc.c b/depends/mimalloc/src/libc.c new file mode 100644 index 000000000000..b40d1204d3bb --- /dev/null +++ b/depends/mimalloc/src/libc.c @@ -0,0 +1,417 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// -------------------------------------------------------- +// This module defines various std libc functions to reduce +// the dependency on libc, and also prevent errors caused +// by some libc implementations when called before `main` +// executes (due to malloc redirection) +// -------------------------------------------------------- + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_getenv + +char _mi_toupper(char c) { + if (c >= 'a' && c <= 'z') return (c - 'a' + 'A'); + else return c; +} + +int _mi_strnicmp(const char* s, const char* t, size_t n) { + if (n == 0) return 0; + for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) { + if (_mi_toupper(*s) != _mi_toupper(*t)) break; + } + return (n == 0 ? 0 : *s - *t); +} + +void _mi_strlcpy(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // copy until end of src, or when dest is (almost) full + while (*src != 0 && dest_size > 1) { + *dest++ = *src++; + dest_size--; + } + // always zero terminate + *dest = 0; +} + +void _mi_strlcat(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // find end of string in the dest buffer + while (*dest != 0 && dest_size > 1) { + dest++; + dest_size--; + } + // and catenate + _mi_strlcpy(dest, src, dest_size); +} + +size_t _mi_strlen(const char* s) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0) { len++; } + return len; +} + +size_t _mi_strnlen(const char* s, size_t max_len) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0 && len < max_len) { len++; } + return len; +} + +#ifdef MI_NO_GETENV +bool _mi_getenv(const char* name, char* result, size_t result_size) { + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} +#else +bool _mi_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL || result == NULL || result_size < 64) return false; + return _mi_prim_getenv(name,result,result_size); +} +#endif + +// -------------------------------------------------------- +// Define our own limited `_mi_vsnprintf` and `_mi_snprintf` +// This is mostly to avoid calling these when libc is not yet +// initialized (and to reduce dependencies) +// +// format: d i, p x u, s +// prec: z l ll L +// width: 10 +// align-left: - +// fill: 0 +// plus: + +// -------------------------------------------------------- + +static void mi_outc(char c, char** out, char* end) { + char* p = *out; + if (p >= end) return; + *p = c; + *out = p + 1; +} + +static void mi_outs(const char* s, char** out, char* end) { + if (s == NULL) return; + char* p = *out; + while (*s != 0 && p < end) { + *p++ = *s++; + } + *out = p; +} + +static void mi_out_fill(char fill, size_t len, char** out, char* end) { + char* p = *out; + for (size_t i = 0; i < len && p < end; i++) { + *p++ = fill; + } + *out = p; +} + +static void mi_out_alignright(char fill, char* start, size_t len, size_t extra, char* end) { + if (len == 0 || extra == 0) return; + if (start + len + extra >= end) return; + // move `len` characters to the right (in reverse since it can overlap) + for (size_t i = 1; i <= len; i++) { + start[len + extra - i] = start[len - i]; + } + // and fill the start + for (size_t i = 0; i < extra; i++) { + start[i] = fill; + } +} + + +static void mi_out_num(uintmax_t x, size_t base, char prefix, char** out, char* end) +{ + if (x == 0 || base == 0 || base > 16) { + if (prefix != 0) { mi_outc(prefix, out, end); } + mi_outc('0',out,end); + } + else { + // output digits in reverse + char* start = *out; + while (x > 0) { + char digit = (char)(x % base); + mi_outc((digit <= 9 ? '0' + digit : 'A' + digit - 10),out,end); + x = x / base; + } + if (prefix != 0) { + mi_outc(prefix, out, end); + } + size_t len = *out - start; + // and reverse in-place + for (size_t i = 0; i < (len / 2); i++) { + char c = start[len - i - 1]; + start[len - i - 1] = start[i]; + start[i] = c; + } + } +} + + +#define MI_NEXTC() c = *in; if (c==0) break; in++; + +int _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { + if (buf == NULL || bufsize == 0 || fmt == NULL) return 0; + buf[bufsize - 1] = 0; + char* const end = buf + (bufsize - 1); + const char* in = fmt; + char* out = buf; + while (true) { + if (out >= end) break; + char c; + MI_NEXTC(); + if (c != '%') { + if (c == '\\') { + MI_NEXTC(); + switch (c) { + case 'e': mi_outc('\x1B', &out, end); break; + case 't': mi_outc('\t', &out, end); break; + case 'n': mi_outc('\n', &out, end); break; + case 'r': mi_outc('\r', &out, end); break; + case '\\': mi_outc('\\', &out, end); break; + default: /* ignore */ break; + } + } + else if ((c >= ' ' && c <= '~') || c=='\n' || c=='\r' || c=='\t' || c=='\x1b') { // output visible ascii or standard control only + mi_outc(c, &out, end); + } + } + else { + MI_NEXTC(); + char fill = ' '; + size_t width = 0; + char numtype = 'd'; + char numplus = 0; + bool alignright = true; + if (c == '+' || c == ' ') { numplus = c; MI_NEXTC(); } + if (c == '-') { alignright = false; MI_NEXTC(); } + if (c == '0') { fill = '0'; MI_NEXTC(); } + if (c >= '1' && c <= '9') { + width = (c - '0'); MI_NEXTC(); + while (c >= '0' && c <= '9') { + width = (10 * width) + (c - '0'); MI_NEXTC(); + } + if (c == 0) break; // extra check due to while + } + if (c == 'z' || c == 't' || c == 'L') { numtype = c; MI_NEXTC(); } + else if (c == 'l') { + numtype = c; MI_NEXTC(); + if (c == 'l') { numtype = 'L'; MI_NEXTC(); } + } + + char* start = out; + if (c == '%') { + mi_outc('%', &out, end); + } + else if (c == 's') { + // string + const char* s = va_arg(args, const char*); + mi_outs(s, &out, end); + } + else if (c == 'p' || c == 'x' || c == 'u') { + // unsigned + uintmax_t x = 0; + if (c == 'x' || c == 'u') { + if (numtype == 'z') x = va_arg(args, size_t); + else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t + else if (numtype == 'L') x = va_arg(args, unsigned long long); + else if (numtype == 'l') x = va_arg(args, unsigned long); + else x = va_arg(args, unsigned int); + } + else if (c == 'p') { + x = va_arg(args, uintptr_t); + mi_outs("0x", &out, end); + start = out; + width = (width >= 2 ? width - 2 : 0); + } + if (width == 0 && (c == 'x' || c == 'p')) { + if (c == 'p') { width = 2 * (x <= UINT32_MAX ? 4 : ((x >> 16) <= UINT32_MAX ? 6 : sizeof(void*))); } + if (width == 0) { width = 2; } + fill = '0'; + } + mi_out_num(x, (c == 'x' || c == 'p' ? 16 : 10), numplus, &out, end); + } + else if (c == 'i' || c == 'd') { + // signed + intmax_t x = 0; + if (numtype == 'z') x = va_arg(args, intptr_t ); + else if (numtype == 't') x = va_arg(args, ptrdiff_t); + else if (numtype == 'L') x = va_arg(args, long long); + else if (numtype == 'l') x = va_arg(args, long); + else x = va_arg(args, int); + char pre = 0; + if (x < 0) { + pre = '-'; + if (x > INTMAX_MIN) { x = -x; } + } + else if (numplus != 0) { + pre = numplus; + } + mi_out_num((uintmax_t)x, 10, pre, &out, end); + } + else if (c >= ' ' && c <= '~') { + // unknown format + mi_outc('%', &out, end); + mi_outc(c, &out, end); + } + + // fill & align + mi_assert_internal(out <= end); + mi_assert_internal(out >= start); + const size_t len = out - start; + if (len < width) { + mi_out_fill(fill, width - len, &out, end); + if (alignright && out <= end) { + mi_out_alignright(fill, start, len, width - len, end); + } + } + } + } + mi_assert_internal(out <= end); + *out = 0; + return (int)(out - buf); +} + +int _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + const int written = _mi_vsnprintf(buf, buflen, fmt, args); + va_end(args); + return written; +} + + + +// -------------------------------------------------------- +// generic trailing and leading zero count, and popcount +// -------------------------------------------------------- + +#if !MI_HAS_FAST_BITSCAN + +static size_t mi_ctz_generic32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + if (x==0) return 32; + return debruijn[(uint32_t)((x & -(int32_t)x) * (uint32_t)(0x077CB531U)) >> 27]; +} + +static size_t mi_clz_generic32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, + 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0 + }; + if (x==0) return 32; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + return debruijn[(uint32_t)(x * (uint32_t)(0x07C4ACDDU)) >> 27]; +} + +size_t _mi_ctz_generic(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (MI_SIZE_BITS <= 32) + return mi_ctz_generic32((uint32_t)x); + #else + const uint32_t lo = (uint32_t)x; + if (lo != 0) { + return mi_ctz_generic32(lo); + } + else { + return (32 + mi_ctz_generic32((uint32_t)(x>>32))); + } + #endif +} + +size_t _mi_clz_generic(size_t x) { + if (x==0) return MI_SIZE_BITS; + #if (MI_SIZE_BITS <= 32) + return mi_clz_generic32((uint32_t)x); + #else + const uint32_t hi = (uint32_t)(x>>32); + if (hi != 0) { + return mi_clz_generic32(hi); + } + else { + return 32 + mi_clz_generic32((uint32_t)x); + } + #endif +} + +#endif // bit scan + + +#if MI_SIZE_SIZE == 4 +#define mi_mask_even_bits32 (0x55555555) +#define mi_mask_even_pairs32 (0x33333333) +#define mi_mask_even_nibbles32 (0x0F0F0F0F) + +// sum of all the bytes in `x` if it is guaranteed that the sum < 256! +static size_t mi_byte_sum32(uint32_t x) { + // perform `x * 0x01010101`: the highest byte contains the sum of all bytes. + x += (x << 8); + x += (x << 16); + return (size_t)(x >> 24); +} + +static size_t mi_popcount_generic32(uint32_t x) { + // first count each 2-bit group `a`, where: a==0b00 -> 00, a==0b01 -> 01, a==0b10 -> 01, a==0b11 -> 10 + // in other words, `a - (a>>1)`; to do this in parallel, we need to mask to prevent spilling a bit pair + // into the lower bit-pair: + x = x - ((x >> 1) & mi_mask_even_bits32); + // add the 2-bit pair results + x = (x & mi_mask_even_pairs32) + ((x >> 2) & mi_mask_even_pairs32); + // add the 4-bit nibble results + x = (x + (x >> 4)) & mi_mask_even_nibbles32; + // each byte now has a count of its bits, we can sum them now: + return mi_byte_sum32(x); +} + +mi_decl_noinline size_t _mi_popcount_generic(size_t x) { + if (x<=1) return x; + if (~x==0) return MI_SIZE_BITS; + return mi_popcount_generic32(x); +} + +#else +#define mi_mask_even_bits64 (0x5555555555555555) +#define mi_mask_even_pairs64 (0x3333333333333333) +#define mi_mask_even_nibbles64 (0x0F0F0F0F0F0F0F0F) + +// sum of all the bytes in `x` if it is guaranteed that the sum < 256! +static size_t mi_byte_sum64(uint64_t x) { + x += (x << 8); + x += (x << 16); + x += (x << 32); + return (size_t)(x >> 56); +} + +static size_t mi_popcount_generic64(uint64_t x) { + x = x - ((x >> 1) & mi_mask_even_bits64); + x = (x & mi_mask_even_pairs64) + ((x >> 2) & mi_mask_even_pairs64); + x = (x + (x >> 4)) & mi_mask_even_nibbles64; + return mi_byte_sum64(x); +} + +mi_decl_noinline size_t _mi_popcount_generic(size_t x) { + if (x<=1) return x; + if (~x==0) return MI_SIZE_BITS; + return mi_popcount_generic64(x); +} +#endif + diff --git a/depends/mimalloc/src/options.c b/depends/mimalloc/src/options.c index 367bc0d27784..540d9f36c271 100644 --- a/depends/mimalloc/src/options.c +++ b/depends/mimalloc/src/options.c @@ -5,19 +5,12 @@ terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include -#include // strtol -#include // strncpy, strncat, strlen, strstr -#include // toupper -#include - -#ifdef _MSC_VER -#pragma warning(disable:4996) // strncpy, strncat -#endif +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // mi_prim_out_stderr +#include // stdin/stdout +#include // abort static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit) static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit) @@ -28,9 +21,6 @@ int mi_version(void) mi_attr_noexcept { return MI_MALLOC_VERSION; } -#ifdef _WIN32 -#include -#endif // -------------------------------------------------------- // Options @@ -38,89 +28,238 @@ int mi_version(void) mi_attr_noexcept { // concurrently initialized, but an initializing data race // is ok since they resolve to the same value. // -------------------------------------------------------- -typedef enum mi_init_e { - UNINIT, // not yet initialized - DEFAULTED, // not found in the environment, use default value - INITIALIZED // found in environment or set explicitly -} mi_init_t; - -typedef struct mi_option_desc_s { - long value; // the value - mi_init_t init; // is it initialized yet? (from the environment) - mi_option_t option; // for debugging: the option index should match the option - const char* name; // option name without `mimalloc_` prefix - const char* legacy_name; // potential legacy v1.x option name -} mi_option_desc_t; + #define MI_OPTION(opt) mi_option_##opt, #opt, NULL #define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy -static mi_option_desc_t options[_mi_option_last] = +// Some options can be set at build time for statically linked libraries +// (use `-DMI_EXTRA_CPPDEFS="opt1=val1;opt2=val2"`) +// +// This is useful if we cannot pass them as environment variables +// (and setting them programmatically would be too late) + +#ifndef MI_DEFAULT_VERBOSE +#define MI_DEFAULT_VERBOSE 0 +#endif + +#ifndef MI_DEFAULT_EAGER_COMMIT +#define MI_DEFAULT_EAGER_COMMIT 1 +#endif + +#ifndef MI_DEFAULT_ARENA_EAGER_COMMIT +#define MI_DEFAULT_ARENA_EAGER_COMMIT 2 +#endif + +// in KiB +#ifndef MI_DEFAULT_ARENA_RESERVE + #if (MI_INTPTR_SIZE>4) + #define MI_DEFAULT_ARENA_RESERVE 1024L*1024L + #else + #define MI_DEFAULT_ARENA_RESERVE 128L*1024L + #endif +#endif + +#ifndef MI_DEFAULT_DISALLOW_ARENA_ALLOC +#define MI_DEFAULT_DISALLOW_ARENA_ALLOC 0 +#endif + +#ifndef MI_DEFAULT_ALLOW_LARGE_OS_PAGES +#if defined(__linux__) && !defined(__ANDROID__) +#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 2 // enabled, but only use transparent huge pages through madvise +#else +#define MI_DEFAULT_ALLOW_LARGE_OS_PAGES 0 +#endif +#endif + +#ifndef MI_DEFAULT_RESERVE_HUGE_OS_PAGES +#define MI_DEFAULT_RESERVE_HUGE_OS_PAGES 0 +#endif + +#ifndef MI_DEFAULT_RESERVE_OS_MEMORY +#define MI_DEFAULT_RESERVE_OS_MEMORY 0 +#endif + +#ifndef MI_DEFAULT_GUARDED_SAMPLE_RATE +#if MI_GUARDED +#define MI_DEFAULT_GUARDED_SAMPLE_RATE 4000 +#else +#define MI_DEFAULT_GUARDED_SAMPLE_RATE 0 +#endif +#endif + +#ifndef MI_DEFAULT_PAGEMAP_COMMIT +#if defined(__APPLE__) // when overloading malloc, we still get mixed pointers sometimes on macOS; this avoids a bad access +#define MI_DEFAULT_PAGEMAP_COMMIT 1 +#else +#define MI_DEFAULT_PAGEMAP_COMMIT 0 +#endif +#endif + +#ifndef MI_DEFAULT_PAGE_MAX_RECLAIM +#define MI_DEFAULT_PAGE_MAX_RECLAIM (-1) // unlimited +#endif + +#ifndef MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM +#define MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM 32 +#endif + +// Static options +static mi_option_desc_t mi_options[_mi_option_last] = { // stable options - #if MI_DEBUG || defined(MI_SHOW_ERRORS) - { 1, UNINIT, MI_OPTION(show_errors) }, - #else - { 0, UNINIT, MI_OPTION(show_errors) }, - #endif - { 0, UNINIT, MI_OPTION(show_stats) }, - { 0, UNINIT, MI_OPTION(verbose) }, - - // Some of the following options are experimental and not all combinations are valid. Use with care. - { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`) - { 0, UNINIT, MI_OPTION(deprecated_eager_region_commit) }, - { 0, UNINIT, MI_OPTION(deprecated_reset_decommits) }, - { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's - { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages - { -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N - { 0, UNINIT, MI_OPTION(reserve_os_memory) }, - { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread - { 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free - { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_decommit, abandoned_page_reset) },// decommit free page memory when a thread terminates - { 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, - #if defined(__NetBSD__) - { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed - #elif defined(_WIN32) - { 4, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) - #else - { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) - #endif - { 25, UNINIT, MI_OPTION_LEGACY(decommit_delay, reset_delay) }, // page decommit delay in milli-seconds - { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. - { 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas) - { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose - { 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output - { 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output - { 8, UNINIT, MI_OPTION(max_segment_reclaim)},// max. number of segment reclaims from the abandoned segments per try. - { 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after decommit_delay milli-seconds) - { 500, UNINIT, MI_OPTION(segment_decommit_delay) }, // decommit delay in milli-seconds for freed segments - { 2, UNINIT, MI_OPTION(decommit_extend_delay) } +#if MI_DEBUG || defined(MI_SHOW_ERRORS) + { 1, MI_OPTION_UNINIT, MI_OPTION(show_errors) }, +#else + { 0, MI_OPTION_UNINIT, MI_OPTION(show_errors) }, +#endif + { 0, MI_OPTION_UNINIT, MI_OPTION(show_stats) }, + { MI_DEFAULT_VERBOSE, MI_OPTION_UNINIT, MI_OPTION(verbose) }, + + // some of the following options are experimental and not all combinations are allowed. + { MI_DEFAULT_EAGER_COMMIT, + MI_OPTION_UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`) + { MI_DEFAULT_ARENA_EAGER_COMMIT, + MI_OPTION_UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux) + { 1, MI_OPTION_UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit) + { MI_DEFAULT_ALLOW_LARGE_OS_PAGES, + MI_OPTION_UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's + { MI_DEFAULT_RESERVE_HUGE_OS_PAGES, + MI_OPTION_UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages + {-1, MI_OPTION_UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N + { MI_DEFAULT_RESERVE_OS_MEMORY, + MI_OPTION_UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`) + { 0, MI_OPTION_UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread + { 0, MI_OPTION_UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free + { 0, MI_OPTION_UNINIT, MI_OPTION(abandoned_page_purge) }, // purge free page memory when a thread terminates + { 0, MI_OPTION_UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit) +#if defined(__NetBSD__) + { 0, MI_OPTION_UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed +#else + { 1, MI_OPTION_UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) +#endif + { 1000,MI_OPTION_UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds + { 0, MI_OPTION_UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. + { 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas) + { 100, MI_OPTION_UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose + { 32, MI_OPTION_UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output + { 32, MI_OPTION_UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output + { 10, MI_OPTION_UNINIT, MI_OPTION(deprecated_max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. + { 0, MI_OPTION_UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! + { MI_DEFAULT_ARENA_RESERVE, MI_OPTION_UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) + { 1, MI_OPTION_UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's + { 1, MI_OPTION_UNINIT, MI_OPTION_LEGACY(deprecated_purge_extend_delay, decommit_extend_delay) }, + { MI_DEFAULT_DISALLOW_ARENA_ALLOC, MI_OPTION_UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) + { 400, MI_OPTION_UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. +#if defined(MI_VISIT_ABANDONED) + { 1, MI_OPTION_INITIALIZED, MI_OPTION(visit_abandoned) }, // allow visiting heap blocks in abandoned segments; requires taking locks during reclaim. +#else + { 0, MI_OPTION_UNINIT, MI_OPTION(visit_abandoned) }, +#endif + { 0, MI_OPTION_UNINIT, MI_OPTION(guarded_min) }, // only used when building with MI_GUARDED: minimal rounded object size for guarded objects + { MI_GiB, MI_OPTION_UNINIT, MI_OPTION(guarded_max) }, // only used when building with MI_GUARDED: maximal rounded object size for guarded objects + { 0, MI_OPTION_UNINIT, MI_OPTION(guarded_precise) }, // disregard minimal alignment requirement to always place guarded blocks exactly in front of a guard page (=0) + { MI_DEFAULT_GUARDED_SAMPLE_RATE, + MI_OPTION_UNINIT, MI_OPTION(guarded_sample_rate)}, // 1 out of N allocations in the min/max range will be guarded (=4000) + { 0, MI_OPTION_UNINIT, MI_OPTION(guarded_sample_seed)}, + { 10000, MI_OPTION_UNINIT, MI_OPTION(generic_collect) }, // collect heaps every N (=10000) generic allocation calls + { 0, MI_OPTION_UNINIT, MI_OPTION_LEGACY(page_reclaim_on_free, abandoned_reclaim_on_free) },// reclaim abandoned (small) pages on a free: -1 = disable completely, 0 = only reclaim into the originating heap, 1 = reclaim on free across heaps + { 2, MI_OPTION_UNINIT, MI_OPTION(page_full_retain) }, // number of (small) pages to retain in the free page queues + { 4, MI_OPTION_UNINIT, MI_OPTION(page_max_candidates) }, // max search to find a best page candidate + { 0, MI_OPTION_UNINIT, MI_OPTION(max_vabits) }, // max virtual address space bits + { MI_DEFAULT_PAGEMAP_COMMIT, + MI_OPTION_UNINIT, MI_OPTION(pagemap_commit) }, // commit the full pagemap upfront? + { 0, MI_OPTION_UNINIT, MI_OPTION(page_commit_on_demand) }, // commit pages on-demand (2 disables this only on overcommit systems (like Linux)) + { MI_DEFAULT_PAGE_MAX_RECLAIM, + MI_OPTION_UNINIT, MI_OPTION(page_max_reclaim) }, // don't reclaim (small) pages of the same originating heap if we already own N pages in that size class + { MI_DEFAULT_PAGE_CROSS_THREAD_MAX_RECLAIM, + MI_OPTION_UNINIT, MI_OPTION(page_cross_thread_max_reclaim) }, // don't reclaim (small) pages across threads if we already own N pages in that size class }; static void mi_option_init(mi_option_desc_t* desc); +static bool mi_option_has_size_in_kib(mi_option_t option) { + return (option == mi_option_reserve_os_memory || option == mi_option_arena_reserve); +} + void _mi_options_init(void) { - // called on process load; should not be called before the CRT is initialized! - // (e.g. do not call this from process_init as that may run before CRT initialization) + // called on process load mi_add_stderr_output(); // now it safe to use stderr for output for(int i = 0; i < _mi_option_last; i++ ) { mi_option_t option = (mi_option_t)i; long l = mi_option_get(option); MI_UNUSED(l); // initialize - if (option != mi_option_verbose) { - mi_option_desc_t* desc = &options[option]; - _mi_verbose_message("option '%s': %ld\n", desc->name, desc->value); - } } mi_max_error_count = mi_option_get(mi_option_max_errors); mi_max_warning_count = mi_option_get(mi_option_max_warnings); + #if MI_GUARDED + if (mi_option_get(mi_option_guarded_sample_rate) > 0) { + if (mi_option_is_enabled(mi_option_allow_large_os_pages)) { + mi_option_disable(mi_option_allow_large_os_pages); + _mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n"); + } + } + #endif + if (mi_option_is_enabled(mi_option_verbose)) { mi_options_print(); } +} + +#define mi_stringifyx(str) #str // and stringify +#define mi_stringify(str) mi_stringifyx(str) // expand + +void mi_options_print(void) mi_attr_noexcept +{ + // show version + const int vermajor = MI_MALLOC_VERSION/100; + const int verminor = (MI_MALLOC_VERSION%100)/10; + const int verpatch = (MI_MALLOC_VERSION%10); + _mi_message("v%i.%i.%i%s%s (built on %s, %s)\n", vermajor, verminor, verpatch, + #if defined(MI_CMAKE_BUILD_TYPE) + ", " mi_stringify(MI_CMAKE_BUILD_TYPE) + #else + "" + #endif + , + #if defined(MI_GIT_DESCRIBE) + ", git " mi_stringify(MI_GIT_DESCRIBE) + #else + "" + #endif + , __DATE__, __TIME__); + + // show options + for (int i = 0; i < _mi_option_last; i++) { + mi_option_t option = (mi_option_t)i; + long l = mi_option_get(option); MI_UNUSED(l); // possibly initialize + mi_option_desc_t* desc = &mi_options[option]; + _mi_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : "")); + } + + // show build configuration + _mi_message("debug level : %d\n", MI_DEBUG ); + _mi_message("secure level: %d\n", MI_SECURE ); + _mi_message("mem tracking: %s\n", MI_TRACK_TOOL); + #if MI_GUARDED + _mi_message("guarded build: %s\n", mi_option_get(mi_option_guarded_sample_rate) != 0 ? "enabled" : "disabled"); + #endif + #if MI_TSAN + _mi_message("thread santizer enabled\n"); + #endif +} + +long _mi_option_get_fast(mi_option_t option) { + mi_assert(option >= 0 && option < _mi_option_last); + mi_option_desc_t* desc = &mi_options[option]; + mi_assert(desc->option == option); // index should match the option + //mi_assert(desc->init != MI_OPTION_UNINIT); + return desc->value; } + mi_decl_nodiscard long mi_option_get(mi_option_t option) { mi_assert(option >= 0 && option < _mi_option_last); if (option < 0 || option >= _mi_option_last) return 0; - mi_option_desc_t* desc = &options[option]; + mi_option_desc_t* desc = &mi_options[option]; mi_assert(desc->option == option); // index should match the option - if mi_unlikely(desc->init == UNINIT) { + if mi_unlikely(desc->init == MI_OPTION_UNINIT) { mi_option_init(desc); } return desc->value; @@ -131,20 +270,36 @@ mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long ma return (x < min ? min : (x > max ? max : x)); } +mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) { + const long x = mi_option_get(option); + size_t size = (x < 0 ? 0 : (size_t)x); + if (mi_option_has_size_in_kib(option)) { + size *= MI_KiB; + } + return size; +} + void mi_option_set(mi_option_t option, long value) { mi_assert(option >= 0 && option < _mi_option_last); if (option < 0 || option >= _mi_option_last) return; - mi_option_desc_t* desc = &options[option]; + mi_option_desc_t* desc = &mi_options[option]; mi_assert(desc->option == option); // index should match the option desc->value = value; - desc->init = INITIALIZED; + desc->init = MI_OPTION_INITIALIZED; + // ensure min/max range; be careful to not recurse. + if (desc->option == mi_option_guarded_min && _mi_option_get_fast(mi_option_guarded_max) < value) { + mi_option_set(mi_option_guarded_max, value); + } + else if (desc->option == mi_option_guarded_max && _mi_option_get_fast(mi_option_guarded_min) > value) { + mi_option_set(mi_option_guarded_min, value); + } } void mi_option_set_default(mi_option_t option, long value) { mi_assert(option >= 0 && option < _mi_option_last); if (option < 0 || option >= _mi_option_last) return; - mi_option_desc_t* desc = &options[option]; - if (desc->init != INITIALIZED) { + mi_option_desc_t* desc = &mi_options[option]; + if (desc->init != MI_OPTION_INITIALIZED) { desc->value = value; } } @@ -169,28 +324,11 @@ void mi_option_disable(mi_option_t option) { mi_option_set_enabled(option,false); } - static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { MI_UNUSED(arg); - if (msg == NULL) return; - #ifdef _WIN32 - // on windows with redirection, the C runtime cannot handle locale dependent output - // after the main thread closes so we use direct console output. - if (!_mi_preloading()) { - // _cputs(msg); // _cputs cannot be used at is aborts if it fails to lock the console - static HANDLE hcon = INVALID_HANDLE_VALUE; - if (hcon == INVALID_HANDLE_VALUE) { - hcon = GetStdHandle(STD_ERROR_HANDLE); - } - const size_t len = strlen(msg); - if (hcon != INVALID_HANDLE_VALUE && len > 0 && len < UINT32_MAX) { - DWORD written = 0; - WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL); - } + if (msg != NULL && msg[0] != 0) { + _mi_prim_out_stderr(msg); } - #else - fputs(msg, stderr); - #endif } // Since an output function can be registered earliest in the `main` @@ -198,16 +336,16 @@ static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { // an output function is registered it is called immediately with // the output up to that point. #ifndef MI_MAX_DELAY_OUTPUT -#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024)) +#define MI_MAX_DELAY_OUTPUT ((size_t)(16*1024)) #endif -static char out_buf[MI_MAX_DELAY_OUTPUT+1]; +static char mi_output_buffer[MI_MAX_DELAY_OUTPUT+1]; static _Atomic(size_t) out_len; static void mi_cdecl mi_out_buf(const char* msg, void* arg) { MI_UNUSED(arg); if (msg==NULL) return; if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return; - size_t n = strlen(msg); + size_t n = _mi_strlen(msg); if (n==0) return; // claim space size_t start = mi_atomic_add_acq_rel(&out_len, n); @@ -216,7 +354,8 @@ static void mi_cdecl mi_out_buf(const char* msg, void* arg) { if (start+n >= MI_MAX_DELAY_OUTPUT) { n = MI_MAX_DELAY_OUTPUT-start-1; } - _mi_memcpy(&out_buf[start], msg, n); + mi_assert_internal(start + n <= MI_MAX_DELAY_OUTPUT); + _mi_memcpy(&mi_output_buffer[start], msg, n); } static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { @@ -225,10 +364,10 @@ static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); // and output the current contents if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; - out_buf[count] = 0; - out(out_buf,arg); + mi_output_buffer[count] = 0; + out(mi_output_buffer,arg); if (!no_more_buf) { - out_buf[count] = '\n'; // if continue with the buffer, insert a newline + mi_output_buffer[count] = '\n'; // if continue with the buffer, insert a newline } } @@ -264,10 +403,12 @@ void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept { } // add stderr to the delayed output after the module is loaded -static void mi_add_stderr_output() { +static void mi_add_stderr_output(void) { mi_assert_internal(mi_out_default == NULL); - mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr - mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output + if (mi_out_default==NULL) { + mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr + mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output + } } // -------------------------------------------------------- @@ -280,11 +421,11 @@ static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop // inside the C runtime causes another message. // In some cases (like on macOS) the loader already allocates which // calls into mimalloc; if we then access thread locals (like `recurse`) -// this may crash as the access may call _tlv_bootstrap that tries to +// this may crash as the access may call _tlv_bootstrap that tries to // (recursively) invoke malloc again to allocate space for the thread local // variables on demand. This is why we use a _mi_preloading test on such // platforms. However, C code generator may move the initial thread local address -// load before the `if` and we therefore split it out in a separate funcion. +// load before the `if` and we therefore split it out in a separate function. static mi_decl_thread bool recurse = false; static mi_decl_noinline bool mi_recurse_enter_prim(void) { @@ -298,21 +439,21 @@ static mi_decl_noinline void mi_recurse_exit_prim(void) { } static bool mi_recurse_enter(void) { - #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) - if (_mi_preloading()) return true; + #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD) + if (_mi_preloading()) return false; #endif return mi_recurse_enter_prim(); } static void mi_recurse_exit(void) { - #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + #if defined(__APPLE__) || defined(__ANDROID__) || defined(MI_TLS_RECURSE_GUARD) if (_mi_preloading()) return; #endif mi_recurse_exit_prim(); } void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) { - if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr? + if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr? if (!mi_recurse_enter()) return; out = mi_out_get_default(&arg); if (prefix != NULL) out(prefix, arg); @@ -326,12 +467,12 @@ void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* me } // Define our own limited `fprintf` that avoids memory allocation. -// We do this using `snprintf` with a limited buffer. +// We do this using `_mi_vsnprintf` with a limited buffer. static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) { - char buf[512]; + char buf[992]; if (fmt==NULL) return; if (!mi_recurse_enter()) return; - vsnprintf(buf,sizeof(buf)-1,fmt,args); + _mi_vsnprintf(buf, sizeof(buf)-1, fmt, args); mi_recurse_exit(); _mi_fputs(out,arg,prefix,buf); } @@ -344,9 +485,9 @@ void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) { } static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) { - if (prefix != NULL && strlen(prefix) <= 32 && !_mi_is_main_thread()) { + if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) { char tprefix[64]; - snprintf(tprefix, sizeof(tprefix), "%sthread 0x%zx: ", prefix, _mi_thread_id()); + _mi_snprintf(tprefix, sizeof(tprefix), "%sthread 0x%tx: ", prefix, (uintptr_t)_mi_thread_id()); mi_vfprintf(out, arg, tprefix, fmt, args); } else { @@ -354,6 +495,20 @@ static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix } } +void _mi_raw_message(const char* fmt, ...) { + va_list args; + va_start(args, fmt); + mi_vfprintf(NULL, NULL, NULL, fmt, args); + va_end(args); +} + +void _mi_message(const char* fmt, ...) { + va_list args; + va_start(args, fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + void _mi_trace_message(const char* fmt, ...) { if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher va_list args; @@ -391,7 +546,7 @@ void _mi_warning_message(const char* fmt, ...) { #if MI_DEBUG -void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) { +mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) mi_attr_noexcept { _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); abort(); } @@ -406,7 +561,7 @@ static _Atomic(void*) mi_error_arg; // = NULL static void mi_error_default(int err) { MI_UNUSED(err); -#if (MI_DEBUG>0) +#if (MI_DEBUG>0) if (err==EFAULT) { #ifdef _MSC_VER __debugbreak(); @@ -450,178 +605,80 @@ void _mi_error_message(int err, const char* fmt, ...) { // Initialize options by checking the environment // -------------------------------------------------------- -static void mi_strlcpy(char* dest, const char* src, size_t dest_size) { - if (dest==NULL || src==NULL || dest_size == 0) return; - // copy until end of src, or when dest is (almost) full - while (*src != 0 && dest_size > 1) { - *dest++ = *src++; - dest_size--; - } - // always zero terminate - *dest = 0; -} - -static void mi_strlcat(char* dest, const char* src, size_t dest_size) { - if (dest==NULL || src==NULL || dest_size == 0) return; - // find end of string in the dest buffer - while (*dest != 0 && dest_size > 1) { - dest++; - dest_size--; - } - // and catenate - mi_strlcpy(dest, src, dest_size); -} +// TODO: implement ourselves to reduce dependencies on the C runtime +#include // strtol +#include // strstr -#ifdef MI_NO_GETENV -static bool mi_getenv(const char* name, char* result, size_t result_size) { - MI_UNUSED(name); - MI_UNUSED(result); - MI_UNUSED(result_size); - return false; -} -#else -static inline int mi_strnicmp(const char* s, const char* t, size_t n) { - if (n==0) return 0; - for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) { - if (toupper(*s) != toupper(*t)) break; - } - return (n==0 ? 0 : *s - *t); -} -#if defined _WIN32 -// On Windows use GetEnvironmentVariable instead of getenv to work -// reliably even when this is invoked before the C runtime is initialized. -// i.e. when `_mi_preloading() == true`. -// Note: on windows, environment names are not case sensitive. -#include -static bool mi_getenv(const char* name, char* result, size_t result_size) { - result[0] = 0; - size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); - return (len > 0 && len < result_size); -} -#elif !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0) -// On Posix systemsr use `environ` to acces environment variables -// even before the C runtime is initialized. -#if defined(__APPLE__) && defined(__has_include) && __has_include() -#include -static char** mi_get_environ(void) { - return (*_NSGetEnviron()); -} -#else -extern char** environ; -static char** mi_get_environ(void) { - return environ; -} -#endif -static bool mi_getenv(const char* name, char* result, size_t result_size) { - if (name==NULL) return false; - const size_t len = strlen(name); - if (len == 0) return false; - char** env = mi_get_environ(); - if (env == NULL) return false; - // compare up to 256 entries - for (int i = 0; i < 256 && env[i] != NULL; i++) { - const char* s = env[i]; - if (mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive - // found it - mi_strlcpy(result, s + len + 1, result_size); - return true; - } - } - return false; -} -#else -// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime -static bool mi_getenv(const char* name, char* result, size_t result_size) { - // cannot call getenv() when still initializing the C runtime. - if (_mi_preloading()) return false; - const char* s = getenv(name); - if (s == NULL) { - // we check the upper case name too. - char buf[64+1]; - size_t len = strlen(name); - if (len >= sizeof(buf)) len = sizeof(buf) - 1; - for (size_t i = 0; i < len; i++) { - buf[i] = toupper(name[i]); - } - buf[len] = 0; - s = getenv(buf); - } - if (s != NULL && strlen(s) < result_size) { - mi_strlcpy(result, s, result_size); - return true; - } - else { - return false; - } -} -#endif // !MI_USE_ENVIRON -#endif // !MI_NO_GETENV -static void mi_option_init(mi_option_desc_t* desc) { +static void mi_option_init(mi_option_desc_t* desc) { // Read option value from the environment - char s[64+1]; + char s[64 + 1]; char buf[64+1]; - mi_strlcpy(buf, "mimalloc_", sizeof(buf)); - mi_strlcat(buf, desc->name, sizeof(buf)); - bool found = mi_getenv(buf,s,sizeof(s)); + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->name, sizeof(buf)); + bool found = _mi_getenv(buf, s, sizeof(s)); if (!found && desc->legacy_name != NULL) { - mi_strlcpy(buf, "mimalloc_", sizeof(buf)); - mi_strlcat(buf, desc->legacy_name, sizeof(buf)); - found = mi_getenv(buf,s,sizeof(s)); + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->legacy_name, sizeof(buf)); + found = _mi_getenv(buf, s, sizeof(s)); if (found) { - _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name ); - } + _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name); + } } if (found) { - size_t len = strlen(s); - if (len >= sizeof(buf)) len = sizeof(buf) - 1; + size_t len = _mi_strnlen(s, sizeof(buf) - 1); for (size_t i = 0; i < len; i++) { - buf[i] = (char)toupper(s[i]); + buf[i] = _mi_toupper(s[i]); } buf[len] = 0; - if (buf[0]==0 || strstr("1;TRUE;YES;ON", buf) != NULL) { + if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) { desc->value = 1; - desc->init = INITIALIZED; + desc->init = MI_OPTION_INITIALIZED; } else if (strstr("0;FALSE;NO;OFF", buf) != NULL) { desc->value = 0; - desc->init = INITIALIZED; + desc->init = MI_OPTION_INITIALIZED; } else { char* end = buf; long value = strtol(buf, &end, 10); - if (desc->option == mi_option_reserve_os_memory) { - // this option is interpreted in KiB to prevent overflow of `long` + if (mi_option_has_size_in_kib(desc->option)) { + // this option is interpreted in KiB to prevent overflow of `long` for large allocations + // (long is 32-bit on 64-bit windows, which allows for 4TiB max.) + size_t size = (value < 0 ? 0 : (size_t)value); + bool overflow = false; if (*end == 'K') { end++; } - else if (*end == 'M') { value *= MI_KiB; end++; } - else if (*end == 'G') { value *= MI_MiB; end++; } - else { value = (value + MI_KiB - 1) / MI_KiB; } - if (end[0] == 'I' && end[1] == 'B') { end += 2; } - else if (*end == 'B') { end++; } + else if (*end == 'M') { overflow = mi_mul_overflow(size,MI_KiB,&size); end++; } + else if (*end == 'G') { overflow = mi_mul_overflow(size,MI_MiB,&size); end++; } + else if (*end == 'T') { overflow = mi_mul_overflow(size,MI_GiB,&size); end++; } + else { size = (size + MI_KiB - 1) / MI_KiB; } + if (end[0] == 'I' && end[1] == 'B') { end += 2; } // KiB, MiB, GiB, TiB + else if (*end == 'B') { end++; } // Kb, Mb, Gb, Tb + if (overflow || size > MI_MAX_ALLOC_SIZE) { size = (MI_MAX_ALLOC_SIZE / MI_KiB); } + value = (size > LONG_MAX ? LONG_MAX : (long)size); } if (*end == 0) { - desc->value = value; - desc->init = INITIALIZED; + mi_option_set(desc->option, value); } else { // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose. - desc->init = DEFAULTED; + desc->init = MI_OPTION_DEFAULTED; if (desc->option == mi_option_verbose && desc->value == 0) { // if the 'mimalloc_verbose' env var has a bogus value we'd never know // (since the value defaults to 'off') so in that case briefly enable verbose desc->value = 1; - _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name ); + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); desc->value = 0; } else { - _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name ); + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); } } } - mi_assert_internal(desc->init != UNINIT); + mi_assert_internal(desc->init != MI_OPTION_UNINIT); } else if (!_mi_preloading()) { - desc->init = DEFAULTED; + desc->init = MI_OPTION_DEFAULTED; } } diff --git a/depends/mimalloc/src/os.c b/depends/mimalloc/src/os.c index 6d72498730c9..0224bce62591 100644 --- a/depends/mimalloc/src/os.c +++ b/depends/mimalloc/src/os.c @@ -1,118 +1,80 @@ /* ---------------------------------------------------------------------------- -Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ -#ifndef _DEFAULT_SOURCE -#define _DEFAULT_SOURCE // ensure mmap flags are defined -#endif - -#if defined(__sun) -// illumos provides new mman.h api when any of these are defined -// otherwise the old api based on caddr_t which predates the void pointers one. -// stock solaris provides only the former, chose to atomically to discard those -// flags only here rather than project wide tough. -#undef _XOPEN_SOURCE -#undef _POSIX_C_SOURCE -#endif #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" -#include // strerror - -#ifdef _MSC_VER -#pragma warning(disable:4996) // strerror +/* ----------------------------------------------------------- + Initialization. +----------------------------------------------------------- */ +#ifndef MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB +#if MI_INTPTR_SIZE < 8 +#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 4*MI_MiB // 4 GiB +#else +#define MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB 32*MI_MiB // 32 GiB #endif - -#if defined(__wasi__) -#define MI_USE_SBRK #endif -#if defined(_WIN32) -#include -#elif defined(__wasi__) -#include // sbrk +#if defined(__APPLE__) && defined(__aarch64__) +#define MI_PLATFORM_PAGE_SIZE 16*MI_KiB // 16 KiB +#elif defined(__EMSCRIPTEN__) +#define MI_PLATFORM_PAGE_SIZE 64*MI_KiB // 64 KiB #else -#include // mmap -#include // sysconf -#if defined(__linux__) -#include -#include -#if defined(__GLIBC__) -#include // linux mmap flags -#else -#include -#endif -#endif -#if defined(__APPLE__) -#include -#if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR -#include -#endif -#endif -#if defined(__FreeBSD__) || defined(__DragonFly__) -#include -#if __FreeBSD_version >= 1200000 -#include -#include -#endif -#include -#endif +#define MI_PLATFORM_PAGE_SIZE 4*MI_KiB // 4 KiB #endif -/* ----------------------------------------------------------- - Initialization. - On windows initializes support for aligned allocation and - large OS pages (if MIMALLOC_LARGE_OS_PAGES is true). ------------------------------------------------------------ */ -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); -bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); +static mi_os_mem_config_t mi_os_mem_config = { + MI_PLATFORM_PAGE_SIZE, // page size + 0, // large page size (usually 2MiB) + MI_PLATFORM_PAGE_SIZE, // allocation granularity + MI_DEFAULT_PHYSICAL_MEMORY_IN_KIB, + MI_MAX_VABITS, // in `bits.h` + true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) + false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) +}; -static void* mi_align_up_ptr(void* p, size_t alignment) { - return (void*)_mi_align_up((uintptr_t)p, alignment); +bool _mi_os_has_overcommit(void) { + return mi_os_mem_config.has_overcommit; } -static void* mi_align_down_ptr(void* p, size_t alignment) { - return (void*)_mi_align_down((uintptr_t)p, alignment); +bool _mi_os_has_virtual_reserve(void) { + return mi_os_mem_config.has_virtual_reserve; } -// page size (initialized properly in `os_init`) -static size_t os_page_size = 4096; - -// minimal allocation granularity -static size_t os_alloc_granularity = 4096; - -// if non-zero, use large page allocation -static size_t large_os_page_size = 0; - -// is memory overcommit allowed? -// set dynamically in _mi_os_init (and if true we use MAP_NORESERVE) -static bool os_overcommit = true; - -bool _mi_os_has_overcommit(void) { - return os_overcommit; -} - // OS (small) page size size_t _mi_os_page_size(void) { - return os_page_size; + return mi_os_mem_config.page_size; } // if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB) size_t _mi_os_large_page_size(void) { - return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size()); + return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size()); +} + +size_t _mi_os_guard_page_size(void) { + const size_t gsize = _mi_os_page_size(); + mi_assert(gsize <= (MI_ARENA_SLICE_SIZE/8)); + return gsize; +} + +size_t _mi_os_virtual_address_bits(void) { + const size_t vbits = mi_os_mem_config.virtual_address_bits; + mi_assert(vbits <= MI_MAX_VABITS); + return vbits; } -#if !defined(MI_USE_SBRK) && !defined(__wasi__) -static bool use_large_os_page(size_t size, size_t alignment) { +bool _mi_os_use_large_page(size_t size, size_t alignment) { // if we have access, check the size and alignment requirements - if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false; - return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0); + if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_allow_large_os_pages)) return false; + return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0); } -#endif // round to a good OS allocation size (bounded by max 12.5% waste) size_t _mi_os_good_alloc_size(size_t size) { @@ -126,575 +88,147 @@ size_t _mi_os_good_alloc_size(size_t size) { return _mi_align_up(size, align_size); } -#if defined(_WIN32) -// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. -// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) -// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) -// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's. -typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E { - MiMemExtendedParameterInvalidType = 0, - MiMemExtendedParameterAddressRequirements, - MiMemExtendedParameterNumaNode, - MiMemExtendedParameterPartitionHandle, - MiMemExtendedParameterUserPhysicalHandle, - MiMemExtendedParameterAttributeFlags, - MiMemExtendedParameterMax -} MI_MEM_EXTENDED_PARAMETER_TYPE; - -typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S { - struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type; - union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg; -} MI_MEM_EXTENDED_PARAMETER; - -typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S { - PVOID LowestStartingAddress; - PVOID HighestEndingAddress; - SIZE_T Alignment; -} MI_MEM_ADDRESS_REQUIREMENTS; - -#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010 - -#include -typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); -typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); -static PVirtualAlloc2 pVirtualAlloc2 = NULL; -static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; - -// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7 -typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; - -typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); -typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber); -typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask); -static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL; -static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL; -static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL; - -static bool mi_win_enable_large_os_pages(void) -{ - if (large_os_page_size > 0) return true; - - // Try to see if large OS pages are supported - // To use large pages on Windows, we first need access permission - // Set "Lock pages in memory" permission in the group policy editor - // - unsigned long err = 0; - HANDLE token = NULL; - BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token); - if (ok) { - TOKEN_PRIVILEGES tp; - ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid); - if (ok) { - tp.PrivilegeCount = 1; - tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; - ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); - if (ok) { - err = GetLastError(); - ok = (err == ERROR_SUCCESS); - if (ok) { - large_os_page_size = GetLargePageMinimum(); - } - } - } - CloseHandle(token); - } - if (!ok) { - if (err == 0) err = GetLastError(); - _mi_warning_message("cannot enable large OS page support, error %lu\n", err); - } - return (ok!=0); -} - -void _mi_os_init(void) -{ - os_overcommit = false; - // get the page size - SYSTEM_INFO si; - GetSystemInfo(&si); - if (si.dwPageSize > 0) os_page_size = si.dwPageSize; - if (si.dwAllocationGranularity > 0) os_alloc_granularity = si.dwAllocationGranularity; - // get the VirtualAlloc2 function - HINSTANCE hDll; - hDll = LoadLibrary(TEXT("kernelbase.dll")); - if (hDll != NULL) { - // use VirtualAlloc2FromApp if possible as it is available to Windows store apps - pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp"); - if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2"); - FreeLibrary(hDll); - } - // NtAllocateVirtualMemoryEx is used for huge page allocation - hDll = LoadLibrary(TEXT("ntdll.dll")); - if (hDll != NULL) { - pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); - FreeLibrary(hDll); - } - // Try to use Win7+ numa API - hDll = LoadLibrary(TEXT("kernel32.dll")); - if (hDll != NULL) { - pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx"); - pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx"); - pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx"); - FreeLibrary(hDll); - } - if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { - mi_win_enable_large_os_pages(); - } -} -#elif defined(__wasi__) -void _mi_os_init(void) { - os_overcommit = false; - os_page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB - os_alloc_granularity = 16; -} - -#else // generic unix - -static void os_detect_overcommit(void) { -#if defined(__linux__) - int fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); - if (fd < 0) return; - char buf[32]; - ssize_t nread = read(fd, &buf, sizeof(buf)); - close(fd); - // - // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE) - if (nread >= 1) { - os_overcommit = (buf[0] == '0' || buf[0] == '1'); - } -#elif defined(__FreeBSD__) - int val = 0; - size_t olen = sizeof(val); - if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) { - os_overcommit = (val != 0); - } -#else - // default: overcommit is true -#endif -} - void _mi_os_init(void) { - // get the page size - long result = sysconf(_SC_PAGESIZE); - if (result > 0) { - os_page_size = (size_t)result; - os_alloc_granularity = os_page_size; - } - large_os_page_size = 2*MI_MiB; // TODO: can we query the OS for this? - os_detect_overcommit(); -} -#endif - - -#if defined(MADV_NORMAL) -static int mi_madvise(void* addr, size_t length, int advice) { - #if defined(__sun) - return madvise((caddr_t)addr, length, advice); // Solaris needs cast (issue #520) - #else - return madvise(addr, length, advice); - #endif + _mi_prim_mem_init(&mi_os_mem_config); } -#endif /* ----------------------------------------------------------- - aligned hinting + Util -------------------------------------------------------------- */ +bool _mi_os_decommit(void* addr, size_t size); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero); -// On 64-bit systems, we can do efficient aligned allocation by using -// the 2TiB to 30TiB area to allocate those. -#if (MI_INTPTR_SIZE >= 8) -static mi_decl_cache_align _Atomic(uintptr_t)aligned_base; - -// Return a MI_SEGMENT_SIZE aligned address that is probably available. -// If this returns NULL, the OS will determine the address but on some OS's that may not be -// properly aligned which can be more costly as it needs to be adjusted afterwards. -// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization; -// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses -// in the middle of the 2TiB - 6TiB address range (see issue #372)) - -#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start -#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes) -#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages) - -static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) -{ - if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL; - size = _mi_align_up(size, MI_SEGMENT_SIZE); - if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096. - #if (MI_SECURE>0) - size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas. - #endif - - uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size); - if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize - uintptr_t init = MI_HINT_BASE; - #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode - uintptr_t r = _mi_heap_random_next(mi_get_default_heap()); - init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB - #endif - uintptr_t expected = hint + size; - mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init); - hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all - } - if (hint%try_alignment != 0) return NULL; - return (void*)hint; -} -#else -static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) { +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { MI_UNUSED(try_alignment); MI_UNUSED(size); return NULL; } -#endif - -/* ----------------------------------------------------------- - Free memory --------------------------------------------------------------- */ - -static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* stats) -{ - if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr) - bool err = false; -#if defined(_WIN32) - DWORD errcode = 0; - err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); - if (err) { errcode = GetLastError(); } - if (errcode == ERROR_INVALID_ADDRESS) { - // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside - // the memory region returned by VirtualAlloc; in that case we need to free using - // the start of the region. - MEMORY_BASIC_INFORMATION info = { 0 }; - VirtualQuery(addr, &info, sizeof(info)); - if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) { - errcode = 0; - err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0); - if (err) { errcode = GetLastError(); } - } - } - if (errcode != 0) { - _mi_warning_message("unable to release OS memory: error code 0x%x, addr: %p, size: %zu\n", errcode, addr, size); - } -#elif defined(MI_USE_SBRK) || defined(__wasi__) - err = false; // sbrk heap cannot be shrunk -#else - err = (munmap(addr, size) == -1); - if (err) { - _mi_warning_message("unable to release OS memory: %s, addr: %p, size: %zu\n", strerror(errno), addr, size); - } -#endif - if (was_committed) { _mi_stat_decrease(&stats->committed, size); } - _mi_stat_decrease(&stats->reserved, size); - return !err; -} /* ----------------------------------------------------------- - Raw allocation on Windows (VirtualAlloc) --------------------------------------------------------------- */ - -#ifdef _WIN32 - -#define MEM_COMMIT_RESERVE (MEM_COMMIT|MEM_RESERVE) + Guard page allocation +----------------------------------------------------------- */ -static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) { -#if (MI_INTPTR_SIZE >= 8) - // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations - if (addr == NULL) { - void* hint = mi_os_get_aligned_hint(try_alignment,size); - if (hint != NULL) { - void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE); - if (p != NULL) return p; - _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags); - // fall through on error - } - } -#endif - // on modern Windows try use VirtualAlloc2 for aligned allocation - if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { - MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 }; - reqs.Alignment = try_alignment; - MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} }; - param.Type.Type = MiMemExtendedParameterAddressRequirements; - param.Arg.Pointer = &reqs; - void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); - if (p != NULL) return p; - _mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags); - // fall through on error - } - // last resort - return VirtualAlloc(addr, size, flags, PAGE_READWRITE); +// In secure mode, return the size of a guard page, otherwise 0 +size_t _mi_os_secure_guard_page_size(void) { + #if MI_SECURE > 0 + return _mi_os_guard_page_size(); + #else + return 0; + #endif } -static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { - mi_assert_internal(!(large_only && !allow_large)); - static _Atomic(size_t) large_page_try_ok; // = 0; - void* p = NULL; - // Try to allocate large OS pages (2MiB) if allowed or required. - if ((large_only || use_large_os_page(size, try_alignment)) - && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { - size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); - if (!large_only && try_ok > 0) { - // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. - // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. - mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); +// In secure mode, try to decommit an area and output a warning if this fails. +bool _mi_os_secure_guard_page_set_at(void* addr, mi_memid_t memid) { + if (addr == NULL) return true; + #if MI_SECURE > 0 + bool ok = false; + if (!memid.is_pinned) { + mi_arena_t* const arena = mi_memid_arena(memid); + if (arena != NULL && arena->commit_fun != NULL) { + ok = (*(arena->commit_fun))(false /* decommit */, addr, _mi_os_secure_guard_page_size(), NULL, arena->commit_fun_arg); } else { - // large OS pages must always reserve and commit. - *is_large = true; - p = mi_win_virtual_allocx(addr, size, try_alignment, flags | MEM_LARGE_PAGES); - if (large_only) return p; - // fall back to non-large page allocation on error (`p == NULL`). - if (p == NULL) { - mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations - } + ok = _mi_os_decommit(addr, _mi_os_secure_guard_page_size()); } } - // Fall back to regular page allocation - if (p == NULL) { - *is_large = ((flags&MEM_LARGE_PAGES) != 0); - p = mi_win_virtual_allocx(addr, size, try_alignment, flags); - } - if (p == NULL) { - _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); + if (!ok) { + _mi_error_message(EINVAL, "secure level %d, but failed to commit guard page (at %p of size %zu)\n", MI_SECURE, addr, _mi_os_secure_guard_page_size()); } - return p; + return ok; + #else + MI_UNUSED(memid); + return true; + #endif } -/* ----------------------------------------------------------- - Raw allocation using `sbrk` or `wasm_memory_grow` --------------------------------------------------------------- */ - -#elif defined(MI_USE_SBRK) || defined(__wasi__) -#if defined(MI_USE_SBRK) - static void* mi_memory_grow( size_t size ) { - void* p = sbrk(size); - if (p == (void*)(-1)) return NULL; - #if !defined(__wasi__) // on wasi this is always zero initialized already (?) - memset(p,0,size); - #endif - return p; - } -#elif defined(__wasi__) - static void* mi_memory_grow( size_t size ) { - size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size())) - : __builtin_wasm_memory_size(0)); - if (base == SIZE_MAX) return NULL; - return (void*)(base * _mi_os_page_size()); - } -#endif - -#if defined(MI_USE_PTHREADS) -static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER; -#endif +// In secure mode, try to decommit an area and output a warning if this fails. +bool _mi_os_secure_guard_page_set_before(void* addr, mi_memid_t memid) { + return _mi_os_secure_guard_page_set_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), memid); +} -static void* mi_heap_grow(size_t size, size_t try_alignment) { - void* p = NULL; - if (try_alignment <= 1) { - // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now) - #if defined(MI_USE_PTHREADS) - pthread_mutex_lock(&mi_heap_grow_mutex); - #endif - p = mi_memory_grow(size); - #if defined(MI_USE_PTHREADS) - pthread_mutex_unlock(&mi_heap_grow_mutex); - #endif - } - else { - void* base = NULL; - size_t alloc_size = 0; - // to allocate aligned use a lock to try to avoid thread interaction - // between getting the current size and actual allocation - // (also, `sbrk` is not thread safe in general) - #if defined(MI_USE_PTHREADS) - pthread_mutex_lock(&mi_heap_grow_mutex); - #endif - { - void* current = mi_memory_grow(0); // get current size - if (current != NULL) { - void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space - alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size()); - base = mi_memory_grow(alloc_size); - } +// In secure mode, try to recommit an area +bool _mi_os_secure_guard_page_reset_at(void* addr, mi_memid_t memid) { + if (addr == NULL) return true; + #if MI_SECURE > 0 + if (!memid.is_pinned) { + mi_arena_t* const arena = mi_memid_arena(memid); + if (arena != NULL && arena->commit_fun != NULL) { + return (*(arena->commit_fun))(true, addr, _mi_os_secure_guard_page_size(), NULL, arena->commit_fun_arg); } - #if defined(MI_USE_PTHREADS) - pthread_mutex_unlock(&mi_heap_grow_mutex); - #endif - if (base != NULL) { - p = mi_align_up_ptr(base, try_alignment); - if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) { - // another thread used wasm_memory_grow/sbrk in-between and we do not have enough - // space after alignment. Give up (and waste the space as we cannot shrink :-( ) - // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align) - p = NULL; - } + else { + return _mi_os_commit(addr, _mi_os_secure_guard_page_size(), NULL); } } - if (p == NULL) { - _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment); - errno = ENOMEM; - return NULL; - } - mi_assert_internal( try_alignment == 0 || (uintptr_t)p % try_alignment == 0 ); - return p; + #else + MI_UNUSED(memid); + #endif + return true; +} + +// In secure mode, try to recommit an area +bool _mi_os_secure_guard_page_reset_before(void* addr, mi_memid_t memid) { + return _mi_os_secure_guard_page_reset_at((uint8_t*)addr - _mi_os_secure_guard_page_size(), memid); } + /* ----------------------------------------------------------- - Raw allocation on Unix's (mmap) + Free memory -------------------------------------------------------------- */ -#else -#define MI_OS_USE_MMAP -static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { - MI_UNUSED(try_alignment); - #if defined(MAP_ALIGNED) // BSD - if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { - size_t n = mi_bsr(try_alignment); - if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB - flags |= MAP_ALIGNED(n); - void* p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); - if (p!=MAP_FAILED) return p; - // fall back to regular mmap - } - } - #elif defined(MAP_ALIGN) // Solaris - if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { - void* p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment - if (p!=MAP_FAILED) return p; - // fall back to regular mmap + +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc); + +static void mi_os_prim_free(void* addr, size_t size, size_t commit_size, mi_subproc_t* subproc) { + mi_assert_internal((size % _mi_os_page_size()) == 0); + if (addr == NULL) return; // || _mi_os_is_huge_reserved(addr) + int err = _mi_prim_free(addr, size); // allow size==0 (issue #1041) + if (err != 0) { + _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); } - #endif - #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) - // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations - if (addr == NULL) { - void* hint = mi_os_get_aligned_hint(try_alignment, size); - if (hint != NULL) { - void* p = mmap(hint, size, protect_flags, flags, fd, 0); - if (p!=MAP_FAILED) return p; - // fall back to regular mmap - } + if (subproc == NULL) { subproc = _mi_subproc(); } // from `mi_arenas_unsafe_destroy` we pass subproc_main explicitly as we can no longer use the heap pointer + if (commit_size > 0) { + mi_subproc_stat_decrease(subproc, committed, commit_size); } - #endif - // regular mmap - void* p = mmap(addr, size, protect_flags, flags, fd, 0); - if (p!=MAP_FAILED) return p; - // failed to allocate - return NULL; -} - -static int mi_unix_mmap_fd(void) { -#if defined(VM_MAKE_TAG) - // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) - int os_tag = (int)mi_option_get(mi_option_os_tag); - if (os_tag < 100 || os_tag > 255) os_tag = 100; - return VM_MAKE_TAG(os_tag); -#else - return -1; -#endif + mi_subproc_stat_decrease(subproc, reserved, size); } -static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) { - void* p = NULL; - #if !defined(MAP_ANONYMOUS) - #define MAP_ANONYMOUS MAP_ANON - #endif - #if !defined(MAP_NORESERVE) - #define MAP_NORESERVE 0 - #endif - const int fd = mi_unix_mmap_fd(); - int flags = MAP_PRIVATE | MAP_ANONYMOUS; - if (_mi_os_has_overcommit()) { - flags |= MAP_NORESERVE; - } - #if defined(PROT_MAX) - protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD - #endif - // huge page allocation - if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) { - static _Atomic(size_t) large_page_try_ok; // = 0; - size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); - if (!large_only && try_ok > 0) { - // If the OS is not configured for large OS pages, or the user does not have - // enough permission, the `mmap` will always fail (but it might also fail for other reasons). - // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times - // to avoid too many failing calls to mmap. - mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); - } - else { - int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux - int lfd = fd; - #ifdef MAP_ALIGNED_SUPER - lflags |= MAP_ALIGNED_SUPER; - #endif - #ifdef MAP_HUGETLB - lflags |= MAP_HUGETLB; - #endif - #ifdef MAP_HUGE_1GB - static bool mi_huge_pages_available = true; - if ((size % MI_GiB) == 0 && mi_huge_pages_available) { - lflags |= MAP_HUGE_1GB; - } - else - #endif - { - #ifdef MAP_HUGE_2MB - lflags |= MAP_HUGE_2MB; - #endif +void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_subproc_t* subproc /* can be NULL */) { + if (mi_memkind_is_os(memid.memkind)) { + size_t csize = memid.mem.os.size; + if (csize==0) { csize = _mi_os_good_alloc_size(size); } + mi_assert_internal(csize >= size); + size_t commit_size = (still_committed ? csize : 0); + void* base = addr; + // different base? (due to alignment) + if (memid.mem.os.base != base) { + mi_assert(memid.mem.os.base <= addr); + base = memid.mem.os.base; + const size_t diff = (uint8_t*)addr - (uint8_t*)memid.mem.os.base; + if (memid.mem.os.size==0) { + csize += diff; } - #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB - lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; - #endif - if (large_only || lflags != flags) { - // try large OS page allocation - *is_large = true; - p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd); - #ifdef MAP_HUGE_1GB - if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) { - mi_huge_pages_available = false; // don't try huge 1GiB pages again - _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (error %i)\n", errno); - lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); - p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd); - } - #endif - if (large_only) return p; - if (p == NULL) { - mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations - } + if (still_committed) { + commit_size -= diff; // the (addr-base) part was already un-committed } } - } - // regular allocation - if (p == NULL) { - *is_large = false; - p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd); - if (p != NULL) { - #if defined(MADV_HUGEPAGE) - // Many Linux systems don't allow MAP_HUGETLB but they support instead - // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE - // though since properly aligned allocations will already use large pages if available - // in that case -- in particular for our large regions (in `memory.c`). - // However, some systems only allow THP if called with explicit `madvise`, so - // when large OS pages are enabled for mimalloc, we call `madvise` anyways. - if (allow_large && use_large_os_page(size, try_alignment)) { - if (mi_madvise(p, size, MADV_HUGEPAGE) == 0) { - *is_large = true; // possibly - }; - } - #elif defined(__sun) - if (allow_large && use_large_os_page(size, try_alignment)) { - struct memcntl_mha cmd = {0}; - cmd.mha_pagesize = large_os_page_size; - cmd.mha_cmd = MHA_MAPSIZE_VA; - if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { - *is_large = true; - } - } - #endif + // free it + if (memid.memkind == MI_MEM_OS_HUGE) { + mi_assert(memid.is_pinned); + mi_os_free_huge_os_pages(base, csize, subproc); + } + else { + mi_os_prim_free(base, csize, (still_committed ? commit_size : 0), subproc); } } - if (p == NULL) { - _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, errno, addr, large_only, allow_large); + else { + // nothing to do + mi_assert(memid.memkind < MI_MEM_OS); } - return p; } -#endif + +void _mi_os_free(void* p, size_t size, mi_memid_t memid) { + _mi_os_free_ex(p, size, true, memid, NULL); +} /* ----------------------------------------------------------- @@ -702,148 +236,223 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro -------------------------------------------------------------- */ // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. -static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) { +// Also `hint_addr` is a hint and may be ignored. +static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) { mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(is_large != NULL); if (size == 0) return NULL; - if (!commit) allow_large = false; - if (try_alignment == 0) try_alignment = 1; // avoid 0 to ensure there will be no divide by zero when aligning - + if (!commit) { allow_large = false; } + if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning + *is_zero = false; void* p = NULL; - /* - if (commit && allow_large) { - p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment); - if (p != NULL) { - *is_large = true; - return p; - } + int err = _mi_prim_alloc(hint_addr, size, try_alignment, commit, allow_large, is_large, is_zero, &p); + if (err != 0) { + _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), addr: %p, size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, hint_addr, size, try_alignment, commit, allow_large); } - */ - #if defined(_WIN32) - int flags = MEM_RESERVE; - if (commit) { flags |= MEM_COMMIT; } - p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); - #elif defined(MI_USE_SBRK) || defined(__wasi__) - MI_UNUSED(allow_large); - *is_large = false; - p = mi_heap_grow(size, try_alignment); - #else - int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); - p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); - #endif - mi_stat_counter_increase(stats->mmap_calls, 1); + mi_os_stat_counter_increase(mmap_calls, 1); if (p != NULL) { - _mi_stat_increase(&stats->reserved, size); - if (commit) { _mi_stat_increase(&stats->committed, size); } + mi_os_stat_increase(reserved, size); + if (commit) { + mi_os_stat_increase(committed, size); + // seems needed for asan (or `mimalloc-test-api` fails) + #ifdef MI_TRACK_ASAN + if (*is_zero) { mi_track_mem_defined(p,size); } + else { mi_track_mem_undefined(p,size); } + #endif + } } return p; } +static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero) { + return mi_os_prim_alloc_at(NULL, size, try_alignment, commit, allow_large, is_large, is_zero); +} + // Primitive aligned allocation from the OS. // This function guarantees the allocated memory is aligned. -static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) { +static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) { mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(is_large != NULL); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(base != NULL); if (!commit) allow_large = false; if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; size = _mi_align_up(size, _mi_os_page_size()); - // try first with a hint (this will be aligned directly on Win 10+ or BSD) - void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats); - if (p == NULL) return NULL; - - // if not aligned, free it, overallocate, and unmap around it - if (((uintptr_t)p % alignment != 0)) { - mi_os_mem_free(p, size, commit, stats); - _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (%zu bytes, address: %p, alignment: %zu, commit: %d)\n", size, p, alignment, commit); + // try a direct allocation if the alignment is below the default, or if larger than 1/8 fraction of the size. + const bool try_direct_alloc = (alignment <= mi_os_mem_config.alloc_granularity || alignment > size/8); + + void* p = NULL; + if (try_direct_alloc) { + p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero); + } + + // aligned already? + if (p != NULL && ((uintptr_t)p % alignment) == 0) { + *base = p; + } + else { + // if not aligned, free it, overallocate, and unmap around it + #if !MI_TRACK_ASAN + if (try_direct_alloc) { + _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); + } + #endif + if (p != NULL) { mi_os_prim_free(p, size, (commit ? size : 0), NULL); } if (size >= (SIZE_MAX - alignment)) return NULL; // overflow const size_t over_size = size + alignment; -#if _WIN32 - // over-allocate uncommitted (virtual) memory - p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats); - if (p == NULL) return NULL; - - // set p to the aligned part in the full region - // note: this is dangerous on Windows as VirtualFree needs the actual region pointer - // but in mi_os_mem_free we handle this (hopefully exceptional) situation. - p = mi_align_up_ptr(p, alignment); - - // explicitly commit only the aligned part - if (commit) { - _mi_os_commit(p, size, NULL, stats); + if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block + // over-allocate uncommitted (virtual) memory + p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero); + if (p == NULL) return NULL; + + // set p to the aligned part in the full region + // note: on Windows VirtualFree needs the actual base pointer + // this is handledby having the `base` field in the memid. + *base = p; // remember the base + p = _mi_align_up_ptr(p, alignment); + + // explicitly commit only the aligned part + if (commit) { + if (!_mi_os_commit(p, size, NULL)) { + mi_os_prim_free(*base, over_size, 0, NULL); + return NULL; + } + } + } + else { // mmap can free inside an allocation + // overallocate... + p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero); + if (p == NULL) return NULL; + + // and selectively unmap parts around the over-allocated area. + void* aligned_p = _mi_align_up_ptr(p, alignment); + size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; + size_t mid_size = _mi_align_up(size, _mi_os_page_size()); + size_t post_size = over_size - pre_size - mid_size; + mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); + if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0), NULL); } + if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0), NULL); } + // we can return the aligned pointer on `mmap` systems + p = aligned_p; + *base = aligned_p; // since we freed the pre part, `*base == p`. } -#else - // overallocate... - p = mi_os_mem_alloc(over_size, 1, commit, false, is_large, stats); - if (p == NULL) return NULL; - // and selectively unmap parts around the over-allocated area. (noop on sbrk) - void* aligned_p = mi_align_up_ptr(p, alignment); - size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; - size_t mid_size = _mi_align_up(size, _mi_os_page_size()); - size_t post_size = over_size - pre_size - mid_size; - mi_assert_internal(pre_size < over_size && post_size < over_size && mid_size >= size); - if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats); - if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); - // we can return the aligned pointer on `mmap` (and sbrk) systems - p = aligned_p; -#endif } - mi_assert_internal(p == NULL || (p != NULL && ((uintptr_t)p % alignment) == 0)); + mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0)); return p; } /* ----------------------------------------------------------- - OS API: alloc, free, alloc_aligned + OS API: alloc and alloc_aligned ----------------------------------------------------------- */ -void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; +void* _mi_os_alloc(size_t size, mi_memid_t* memid) { + *memid = _mi_memid_none(); if (size == 0) return NULL; size = _mi_os_good_alloc_size(size); - bool is_large = false; - return mi_os_mem_alloc(size, 0, true, false, &is_large, stats); -} - -void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - if (size == 0 || p == NULL) return; - size = _mi_os_good_alloc_size(size); - mi_os_mem_free(p, size, was_committed, stats); -} + bool os_is_large = false; + bool os_is_zero = false; + void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero); + if (p == NULL) return NULL; -void _mi_os_free(void* p, size_t size, mi_stats_t* stats) { - _mi_os_free_ex(p, size, true, stats); + *memid = _mi_memid_create_os(p, size, true, os_is_zero, os_is_large); + mi_assert_internal(memid->mem.os.size >= size); + mi_assert_internal(memid->initially_committed); + return p; } -void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* tld_stats) +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid) { - MI_UNUSED(&mi_os_get_aligned_hint); // suppress unused warnings - MI_UNUSED(tld_stats); + MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings + *memid = _mi_memid_none(); if (size == 0) return NULL; size = _mi_os_good_alloc_size(size); alignment = _mi_align_up(alignment, _mi_os_page_size()); - bool allow_large = false; - if (large != NULL) { - allow_large = *large; - *large = false; + + bool os_is_large = false; + bool os_is_zero = false; + void* os_base = NULL; + void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base ); + if (p == NULL) return NULL; + + *memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large); + memid->mem.os.base = os_base; + memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned? + + mi_assert_internal(memid->mem.os.size >= size); + mi_assert_internal(_mi_is_aligned(p,alignment)); + if (commit) { mi_assert_internal(memid->initially_committed); } + return p; +} + + +mi_decl_nodiscard static void* mi_os_ensure_zero(void* p, size_t size, mi_memid_t* memid) { + if (p==NULL || size==0) return p; + // ensure committed + if (!memid->initially_committed) { + bool is_zero = false; + if (!_mi_os_commit(p, size, &is_zero)) { + _mi_os_free(p, size, *memid); + return NULL; + } + memid->initially_committed = true; } - return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), &_mi_stats_main /*tld->stats*/ ); + // ensure zero'd + if (memid->initially_zero) return p; + _mi_memzero_aligned(p,size); + memid->initially_zero = true; + return p; } +void* _mi_os_zalloc(size_t size, mi_memid_t* memid) { + void* p = _mi_os_alloc(size,memid); + return mi_os_ensure_zero(p, size, memid); +} +/* ----------------------------------------------------------- + OS aligned allocation with an offset. This is used + for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc + page where the object can be aligned at an offset from the start of the segment. + As we may need to overallocate, we need to free such pointers using `mi_free_aligned` + to use the actual start of the memory region. +----------------------------------------------------------- */ + +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid) { + mi_assert(offset <= size); + mi_assert((alignment % _mi_os_page_size()) == 0); + *memid = _mi_memid_none(); + if (offset == 0) { + // regular aligned allocation + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid); + } + else { + // overallocate to align at an offset + const size_t extra = _mi_align_up(offset, alignment) - offset; + const size_t oversize = size + extra; + void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid); + if (start == NULL) return NULL; + + void* const p = (uint8_t*)start + extra; + mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); + // decommit the overallocation at the start + if (commit && extra > _mi_os_page_size()) { + _mi_os_decommit(start, extra); + } + return p; + } +} /* ----------------------------------------------------------- OS memory API: reset, commit, decommit, protect, unprotect. ----------------------------------------------------------- */ - // OS page align within a given area, either conservative (pages inside the area only), // or not (straddling pages outside the area is possible) static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) { @@ -851,11 +460,11 @@ static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, if (newsize != NULL) *newsize = 0; if (size == 0 || addr == NULL) return NULL; - // page align conservatively within the range - void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size()) + // page align conservatively within the range, or liberally straddling pages outside the range + void* start = (conservative ? _mi_align_up_ptr(addr, _mi_os_page_size()) : mi_align_down_ptr(addr, _mi_os_page_size())); void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size()) - : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size())); + : _mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size())); ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start; if (diff <= 0) return NULL; @@ -868,188 +477,132 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* return mi_os_page_align_areax(true, addr, size, newsize); } -static void mi_mprotect_hint(int err) { -#if defined(MI_OS_USE_MMAP) && (MI_SECURE>=2) // guard page around every mimalloc page - if (err == ENOMEM) { - _mi_warning_message("the previous warning may have been caused by a low memory map limit.\n" - " On Linux this is controlled by the vm.max_map_count. For example:\n" - " > sudo sysctl -w vm.max_map_count=262144\n"); - } -#else - MI_UNUSED(err); -#endif -} - -// Commit/Decommit memory. -// Usually commit is aligned liberal, while decommit is aligned conservative. -// (but not for the reset version where we want commit to be conservative as well) -static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) { - // page align in the range, commit liberally, decommit conservative +bool _mi_os_commit_ex(void* addr, size_t size, bool* is_zero, size_t stat_size) { if (is_zero != NULL) { *is_zero = false; } + mi_os_stat_counter_increase(commit_calls, 1); + + // page align range size_t csize; - void* start = mi_os_page_align_areax(conservative, addr, size, &csize); - if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)) - int err = 0; - if (commit) { - _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit - _mi_stat_counter_increase(&stats->commit_calls, 1); - } - else { - _mi_stat_decrease(&stats->committed, size); - } + void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize); + if (csize == 0) return true; - #if defined(_WIN32) - if (commit) { - // *is_zero = true; // note: if the memory was already committed, the call succeeds but the memory is not zero'd - void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE); - err = (p == start ? 0 : GetLastError()); - } - else { - BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT); - err = (ok ? 0 : GetLastError()); - } - #elif defined(__wasi__) - // WebAssembly guests can't control memory protection - #elif 0 && defined(MAP_FIXED) && !defined(__APPLE__) - // Linux: disabled for now as mmap fixed seems much more expensive than MADV_DONTNEED (and splits VMA's?) - if (commit) { - // commit: just change the protection - err = mprotect(start, csize, (PROT_READ | PROT_WRITE)); - if (err != 0) { err = errno; } - } - else { - // decommit: use mmap with MAP_FIXED to discard the existing memory (and reduce rss) - const int fd = mi_unix_mmap_fd(); - void* p = mmap(start, csize, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); - if (p != start) { err = errno; } + // commit + bool os_is_zero = false; + int err = _mi_prim_commit(start, csize, &os_is_zero); + if (err != 0) { + _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + return false; } - #else - // Linux, macOSX and others. - if (commit) { - // commit: ensure we can access the area - err = mprotect(start, csize, (PROT_READ | PROT_WRITE)); - if (err != 0) { err = errno; } - } - else { - #if defined(MADV_DONTNEED) && MI_DEBUG == 0 && MI_SECURE == 0 - // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) - // (on the other hand, MADV_FREE would be good enough.. it is just not reflected in the stats :-( ) - err = madvise(start, csize, MADV_DONTNEED); - #else - // decommit: just disable access (also used in debug and secure mode to trap on illegal access) - err = mprotect(start, csize, PROT_NONE); - if (err != 0) { err = errno; } - #endif - //#if defined(MADV_FREE_REUSE) - // while ((err = mi_madvise(start, csize, MADV_FREE_REUSE)) != 0 && errno == EAGAIN) { errno = 0; } - //#endif + if (os_is_zero && is_zero != NULL) { + *is_zero = true; + mi_assert_expensive(mi_mem_is_zero(start, csize)); } + // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails) + #ifdef MI_TRACK_ASAN + if (os_is_zero) { mi_track_mem_defined(start,csize); } + else { mi_track_mem_undefined(start,csize); } #endif + mi_os_stat_increase(committed, stat_size); // use size for precise commit vs. decommit + return true; +} + +bool _mi_os_commit(void* addr, size_t size, bool* is_zero) { + return _mi_os_commit_ex(addr, size, is_zero, size); +} + +static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) { + mi_assert_internal(needs_recommit!=NULL); + mi_os_stat_decrease(committed, stat_size); + + // page align + size_t csize; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return true; + + // decommit + *needs_recommit = true; + int err = _mi_prim_decommit(start,csize,needs_recommit); if (err != 0) { - _mi_warning_message("%s error: start: %p, csize: 0x%zx, err: %i\n", commit ? "commit" : "decommit", start, csize, err); - mi_mprotect_hint(err); + _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } mi_assert_internal(err == 0); return (err == 0); } -bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - return mi_os_commitx(addr, size, true, false /* liberal */, is_zero, stats); +bool _mi_os_decommit(void* addr, size_t size) { + bool needs_recommit; + return mi_os_decommit_ex(addr, size, &needs_recommit, size); } -bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - bool is_zero; - return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats); -} - -/* -static bool mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { - return mi_os_commitx(addr, size, true, true // conservative - , is_zero, stats); -} -*/ // Signal to the OS that the address range is no longer in use // but may be used later again. This will release physical memory // pages and reduce swapping while keeping the memory committed. // We page align to a conservative area inside the range to reset. -static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) { +bool _mi_os_reset(void* addr, size_t size) { // page align conservatively within the range size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr) - if (reset) _mi_stat_increase(&stats->reset, csize); - else _mi_stat_decrease(&stats->reset, csize); - if (!reset) return true; // nothing to do on unreset! + mi_os_stat_increase(reset, csize); + mi_os_stat_counter_increase(reset_calls, 1); - #if (MI_DEBUG>1) && !MI_TRACK_ENABLED - if (MI_SECURE==0) { - memset(start, 0, csize); // pretend it is eagerly reset - } + #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN + memset(start, 0, csize); // pretend it is eagerly reset #endif -#if defined(_WIN32) - // Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory - void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE); - mi_assert_internal(p == start); - #if 1 - if (p == start && start != NULL) { - VirtualUnlock(start,csize); // VirtualUnlock after MEM_RESET removes the memory from the working set - } - #endif - if (p != start) return false; -#else -#if defined(MADV_FREE) - static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); - int oadvice = (int)mi_atomic_load_relaxed(&advice); - int err; - while ((err = mi_madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; - if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { - // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on - mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); - err = mi_madvise(start, csize, MADV_DONTNEED); - } -#elif defined(__wasi__) - int err = 0; -#else - int err = mi_madvise(start, csize, MADV_DONTNEED); -#endif + int err = _mi_prim_reset(start, csize); if (err != 0) { - _mi_warning_message("madvise reset error: start: %p, csize: 0x%zx, errno: %i\n", start, csize, errno); + _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } - //mi_assert(err == 0); - if (err != 0) return false; -#endif - return true; + return (err == 0); } -// Signal to the OS that the address range is no longer in use -// but may be used later again. This will release physical memory -// pages and reduce swapping while keeping the memory committed. -// We page align to a conservative area inside the range to reset. -bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - return mi_os_resetx(addr, size, true, stats); + +void _mi_os_reuse( void* addr, size_t size ) { + // page align conservatively within the range + size_t csize = 0; + void* const start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return; + const int err = _mi_prim_reuse(start, csize); + if (err != 0) { + _mi_warning_message("cannot reuse OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } } -/* -bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { - MI_UNUSED(tld_stats); - mi_stats_t* stats = &_mi_stats_main; - if (mi_option_is_enabled(mi_option_reset_decommits)) { - return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!) +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, size_t stat_size, mi_commit_fun_t* commit_fun, void* commit_fun_arg) +{ + if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? + mi_os_stat_counter_increase(purge_calls, 1); + mi_os_stat_increase(purged, size); + + if (commit_fun != NULL) { + bool decommitted = (*commit_fun)(false, p, size, NULL, commit_fun_arg); + return decommitted; // needs_recommit? + } + else if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? + !_mi_preloading()) // don't decommit during preloading (unsafe) + { + bool needs_recommit = true; + mi_os_decommit_ex(p, size, &needs_recommit, stat_size); + return needs_recommit; } else { - *is_zero = false; - return mi_os_resetx(addr, size, false, stats); + if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed (on Windows, we cannot reset uncommitted memory) + _mi_os_reset(p, size); + } + return false; // needs no recommit } } -*/ + +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge(void* p, size_t size) { + return _mi_os_purge_ex(p, size, true, size, NULL, NULL); +} + // Protect a region in memory to be not accessible. static bool mi_os_protectx(void* addr, size_t size, bool protect) { @@ -1062,20 +615,9 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) { _mi_warning_message("cannot mprotect memory allocated in huge OS pages\n"); } */ - int err = 0; -#ifdef _WIN32 - DWORD oldprotect = 0; - BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); - err = (ok ? 0 : GetLastError()); -#elif defined(__wasi__) - err = 0; -#else - err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); - if (err != 0) { err = errno; } -#endif + int err = _mi_prim_protect(start,csize,protect); if (err != 0) { - _mi_warning_message("mprotect error: start: %p, csize: 0x%zx, err: %i\n", start, csize, err); - mi_mprotect_hint(err); + _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize); } return (err == 0); } @@ -1090,115 +632,12 @@ bool _mi_os_unprotect(void* addr, size_t size) { -bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { - // page align conservatively within the range - mi_assert_internal(oldsize > newsize && p != NULL); - if (oldsize < newsize || p == NULL) return false; - if (oldsize == newsize) return true; - - // oldsize and newsize should be page aligned or we cannot shrink precisely - void* addr = (uint8_t*)p + newsize; - size_t size = 0; - void* start = mi_os_page_align_area_conservative(addr, oldsize - newsize, &size); - if (size == 0 || start != addr) return false; - -#ifdef _WIN32 - // we cannot shrink on windows, but we can decommit - return _mi_os_decommit(start, size, stats); -#else - return mi_os_mem_free(start, size, true, stats); -#endif -} - - /* ---------------------------------------------------------------------------- Support for allocating huge OS pages (1Gib) that are reserved up-front and possibly associated with a specific NUMA node. (use `numa_node>=0`) -----------------------------------------------------------------------------*/ #define MI_HUGE_OS_PAGE_SIZE (MI_GiB) -#if defined(_WIN32) && (MI_INTPTR_SIZE >= 8) -static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) -{ - mi_assert_internal(size%MI_GiB == 0); - mi_assert_internal(addr != NULL); - const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE; - - mi_win_enable_large_os_pages(); - - MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} }; - // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages - static bool mi_huge_pages_available = true; - if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) { - params[0].Type.Type = MiMemExtendedParameterAttributeFlags; - params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; - ULONG param_count = 1; - if (numa_node >= 0) { - param_count++; - params[1].Type.Type = MiMemExtendedParameterNumaNode; - params[1].Arg.ULong = (unsigned)numa_node; - } - SIZE_T psize = size; - void* base = addr; - NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); - if (err == 0 && base != NULL) { - return base; - } - else { - // fall back to regular large pages - mi_huge_pages_available = false; // don't try further huge pages - _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); - } - } - // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation - if (pVirtualAlloc2 != NULL && numa_node >= 0) { - params[0].Type.Type = MiMemExtendedParameterNumaNode; - params[0].Arg.ULong = (unsigned)numa_node; - return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1); - } - - // otherwise use regular virtual alloc on older windows - return VirtualAlloc(addr, size, flags, PAGE_READWRITE); -} - -#elif defined(MI_OS_USE_MMAP) && (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) -#include -#ifndef MPOL_PREFERRED -#define MPOL_PREFERRED 1 -#endif -#if defined(SYS_mbind) -static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { - return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags); -} -#else -static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { - MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags); - return 0; -} -#endif -static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) { - mi_assert_internal(size%MI_GiB == 0); - bool is_large = true; - void* p = mi_unix_mmap(addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large); - if (p == NULL) return NULL; - if (numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes - unsigned long numa_mask = (1UL << numa_node); - // TODO: does `mbind` work correctly for huge OS pages? should we - // use `set_mempolicy` before calling mmap instead? - // see: - long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); - if (err != 0) { - _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d: %s\n", numa_node, strerror(errno)); - } - } - return p; -} -#else -static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) { - MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(numa_node); - return NULL; -} -#endif #if (MI_INTPTR_SIZE >= 8) // To ensure proper alignment, use our own area for huge OS pages @@ -1216,15 +655,14 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { start = huge_start; if (start == 0) { // Initialize the start address after the 32TiB area - start = ((uintptr_t)32 << 40); // 32TiB virtual start address -#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode - uintptr_t r = _mi_heap_random_next(mi_get_default_heap()); + start = ((uintptr_t)8 << 40); // 8TiB virtual start address + #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode + uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap()); start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB -#endif + #endif } end = start + size; - mi_assert_internal(end % MI_SEGMENT_SIZE == 0); - } while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end)); + } while (!mi_atomic_cas_weak_acq_rel(&mi_huge_start, &huge_start, end)); if (total_size != NULL) *total_size = size; return (uint8_t*)start; @@ -1237,37 +675,47 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { } #endif -// Allocate MI_SEGMENT_SIZE aligned huge pages -void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize) { +// Allocate MI_ARENA_SLICE_ALIGN aligned huge pages +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) { + *memid = _mi_memid_none(); if (psize != NULL) *psize = 0; if (pages_reserved != NULL) *pages_reserved = 0; size_t size = 0; - uint8_t* start = mi_os_claim_huge_pages(pages, &size); + uint8_t* const start = mi_os_claim_huge_pages(pages, &size); if (start == NULL) return NULL; // or 32-bit systems // Allocate one page at the time but try to place them contiguously // We allocate one page at the time to be able to abort if it takes too long // or to at least allocate as many as available on the system. mi_msecs_t start_t = _mi_clock_start(); - size_t page; - for (page = 0; page < pages; page++) { + size_t page = 0; + bool all_zero = true; + while (page < pages) { // allocate a page + bool is_zero = false; void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE); - void* p = mi_os_alloc_huge_os_pagesx(addr, MI_HUGE_OS_PAGE_SIZE, numa_node); + void* p = NULL; + int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p); + if (!is_zero) { all_zero = false; } + if (err != 0) { + _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE); + break; + } // Did we succeed at a contiguous address? if (p != addr) { // no success, issue a warning and break if (p != NULL) { - _mi_warning_message("could not allocate contiguous huge page %zu at %p\n", page, addr); - _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main); + _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); + mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, NULL); } break; } // success, record it - _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); - _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); + page++; // increase before timeout check (see issue #711) + mi_os_stat_increase(committed, MI_HUGE_OS_PAGE_SIZE); + mi_os_stat_increase(reserved, MI_HUGE_OS_PAGE_SIZE); // check for timeout if (max_msecs > 0) { @@ -1279,7 +727,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse } } if (elapsed > max_msecs) { - _mi_warning_message("huge page allocation timed out\n"); + _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page); break; } } @@ -1287,157 +735,117 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size); if (pages_reserved != NULL) { *pages_reserved = page; } if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; } + if (page != 0) { + mi_assert(start != NULL); + *memid = _mi_memid_create_os(start, size, true /* is committed */, all_zero, true /* is_large */); + memid->memkind = MI_MEM_OS_HUGE; + mi_assert(memid->is_pinned); + #ifdef MI_TRACK_ASAN + if (all_zero) { mi_track_mem_defined(start,size); } + #endif + } return (page == 0 ? NULL : start); } // free every huge page in a range individually (as we allocated per page) // note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. -void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) { +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_subproc_t* subproc) { if (p==NULL || size==0) return; uint8_t* base = (uint8_t*)p; while (size >= MI_HUGE_OS_PAGE_SIZE) { - _mi_os_free(base, MI_HUGE_OS_PAGE_SIZE, stats); + mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, MI_HUGE_OS_PAGE_SIZE, subproc); size -= MI_HUGE_OS_PAGE_SIZE; base += MI_HUGE_OS_PAGE_SIZE; } } + /* ---------------------------------------------------------------------------- Support NUMA aware allocation -----------------------------------------------------------------------------*/ -#ifdef _WIN32 -static size_t mi_os_numa_nodex(void) { - USHORT numa_node = 0; - if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) { - // Extended API is supported - MI_PROCESSOR_NUMBER pnum; - (*pGetCurrentProcessorNumberEx)(&pnum); - USHORT nnode = 0; - BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode); - if (ok) numa_node = nnode; - } - else { - // Vista or earlier, use older API that is limited to 64 processors. Issue #277 - DWORD pnum = GetCurrentProcessorNumber(); - UCHAR nnode = 0; - BOOL ok = GetNumaProcessorNode((UCHAR)pnum, &nnode); - if (ok) numa_node = nnode; - } - return numa_node; -} -static size_t mi_os_numa_node_countx(void) { - ULONG numa_max = 0; - GetNumaHighestNodeNumber(&numa_max); - // find the highest node number that has actual processors assigned to it. Issue #282 - while(numa_max > 0) { - if (pGetNumaNodeProcessorMaskEx != NULL) { - // Extended API is supported - GROUP_AFFINITY affinity; - if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) { - if (affinity.Mask != 0) break; // found the maximum non-empty node - } +static _Atomic(size_t) mi_numa_node_count; // = 0 // cache the node count + +int _mi_os_numa_node_count(void) { + size_t count = mi_atomic_load_acquire(&mi_numa_node_count); + if mi_unlikely(count == 0) { + long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? + if (ncount > 0 && ncount < INT_MAX) { + count = (size_t)ncount; } else { - // Vista or earlier, use older API that is limited to 64 processors. - ULONGLONG mask; - if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) { - if (mask != 0) break; // found the maximum non-empty node - }; + const size_t n = _mi_prim_numa_node_count(); // or detect dynamically + if (n == 0 || n > INT_MAX) { count = 1; } + else { count = n; } } - // max node was invalid or had no processor assigned, try again - numa_max--; + mi_atomic_store_release(&mi_numa_node_count, count); // save it + _mi_verbose_message("using %zd numa regions\n", count); } - return ((size_t)numa_max + 1); + mi_assert_internal(count > 0 && count <= INT_MAX); + return (int)count; } -#elif defined(__linux__) -#include // getcpu -#include // access - -static size_t mi_os_numa_nodex(void) { -#ifdef SYS_getcpu - unsigned long node = 0; - unsigned long ncpu = 0; - long err = syscall(SYS_getcpu, &ncpu, &node, NULL); - if (err != 0) return 0; - return node; -#else - return 0; -#endif + +static int mi_os_numa_node_get(void) { + int numa_count = _mi_os_numa_node_count(); + if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 + // never more than the node count and >= 0 + const size_t n = _mi_prim_numa_node(); + int numa_node = (n < INT_MAX ? (int)n : 0); + if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } + return numa_node; } -static size_t mi_os_numa_node_countx(void) { - char buf[128]; - unsigned node = 0; - for(node = 0; node < 256; node++) { - // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation) - snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1); - if (access(buf,R_OK) != 0) break; + +int _mi_os_numa_node(void) { + if mi_likely(mi_atomic_load_relaxed(&mi_numa_node_count) == 1) { + return 0; } - return (node+1); -} -#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000 -static size_t mi_os_numa_nodex(void) { - domainset_t dom; - size_t node; - int policy; - if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul; - for (node = 0; node < MAXMEMDOM; node++) { - if (DOMAINSET_ISSET(node, &dom)) return node; + else { + return mi_os_numa_node_get(); } - return 0ul; -} -static size_t mi_os_numa_node_countx(void) { - size_t ndomains = 0; - size_t len = sizeof(ndomains); - if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul; - return ndomains; } -#elif defined(__DragonFly__) -static size_t mi_os_numa_nodex(void) { - // TODO: DragonFly does not seem to provide any userland means to get this information. - return 0ul; -} -static size_t mi_os_numa_node_countx(void) { - size_t ncpus = 0, nvirtcoresperphys = 0; - size_t len = sizeof(size_t); - if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul; - if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul; - return nvirtcoresperphys * ncpus; + + +/* ---------------------------------------------------------------------------- + Public API +-----------------------------------------------------------------------------*/ +#if 0 +mi_decl_export void* mi_os_alloc(size_t size, bool commit, size_t* full_size) { + return mi_os_alloc_aligned(size, mi_os_mem_config.alloc_granularity, commit, NULL, full_size); +} + +static void* mi_os_alloc_aligned_ex(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_committed, bool* is_pinned, void** base, size_t* full_size) { + mi_memid_t memid = _mi_memid_none(); + void* p = _mi_os_alloc_aligned(size, alignment, commit, allow_large, &memid); + if (p == NULL) return p; + if (is_committed != NULL) { *is_committed = memid.initially_committed; } + if (is_pinned != NULL) { *is_pinned = memid.is_pinned; } + if (base != NULL) { *base = memid.mem.os.base; } + if (full_size != NULL) { *full_size = memid.mem.os.size; } + if (!memid.initially_zero && memid.initially_committed) { + _mi_memzero_aligned(memid.mem.os.base, memid.mem.os.size); + } + return p; } -#else -static size_t mi_os_numa_nodex(void) { - return 0; + +mi_decl_export void* mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, void** base, size_t* full_size) { + return mi_os_alloc_aligned_ex(size, alignment, commit, false, NULL, NULL, base, full_size); } -static size_t mi_os_numa_node_countx(void) { - return 1; + +mi_decl_export void* mi_os_alloc_aligned_allow_large(size_t size, size_t alignment, bool commit, bool* is_committed, bool* is_pinned, void** base, size_t* full_size) { + return mi_os_alloc_aligned_ex(size, alignment, commit, true, is_committed, is_pinned, base, full_size); } -#endif -_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count +mi_decl_export void mi_os_free(void* p, size_t size) { + if (p==NULL || size == 0) return; + mi_memid_t memid = _mi_memid_create_os(p, size, true, false, false); + _mi_os_free(p, size, memid); +} -size_t _mi_os_numa_node_count_get(void) { - size_t count = mi_atomic_load_acquire(&_mi_numa_node_count); - if (count <= 0) { - long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? - if (ncount > 0) { - count = (size_t)ncount; - } - else { - count = mi_os_numa_node_countx(); // or detect dynamically - if (count == 0) count = 1; - } - mi_atomic_store_release(&_mi_numa_node_count, count); // save it - _mi_verbose_message("using %zd numa regions\n", count); - } - return count; +mi_decl_export void mi_os_commit(void* p, size_t size) { + _mi_os_commit(p, size, NULL); } -int _mi_os_numa_node_get(mi_os_tld_t* tld) { - MI_UNUSED(tld); - size_t numa_count = _mi_os_numa_node_count(); - if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 - // never more than the node count and >= 0 - size_t numa_node = mi_os_numa_nodex(); - if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } - return (int)numa_node; +mi_decl_export void mi_os_decommit(void* p, size_t size) { + _mi_os_decommit(p, size); } +#endif diff --git a/depends/mimalloc/src/page-map.c b/depends/mimalloc/src/page-map.c new file mode 100644 index 000000000000..ce70495b27de --- /dev/null +++ b/depends/mimalloc/src/page-map.c @@ -0,0 +1,389 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2023-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" + +static void mi_page_map_cannot_commit(void) { + _mi_error_message(EFAULT,"unable to commit memory for the page address map\n"); +} + +#if MI_PAGE_MAP_FLAT + +// The page-map contains a byte for each 64kb slice in the address space. +// For an address `a` where `ofs = _mi_page_map[a >> 16]`: +// 0 = unused +// 1 = the slice at `a & ~0xFFFF` is a mimalloc page. +// 1 < ofs <= 127 = the slice is part of a page, starting at `(((a>>16) - ofs - 1) << 16)`. +// +// 1 byte per slice => 1 TiB address space needs a 2^14 * 2^16 = 16 MiB page map. +// A full 256 TiB address space (48 bit) needs a 4 GiB page map. +// A full 4 GiB address space (32 bit) needs only a 64 KiB page map. + +mi_decl_cache_align uint8_t* _mi_page_map = NULL; +static void* mi_page_map_max_address = NULL; +static mi_memid_t mi_page_map_memid; + +#define MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT MI_ARENA_SLICE_SIZE +static mi_bitmap_t* mi_page_map_commit; // one bit per committed 64 KiB entries + +static void mi_page_map_ensure_committed(size_t idx, size_t slice_count); + +bool _mi_page_map_init(void) { + size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS); + if (vbits == 0) { + vbits = _mi_os_virtual_address_bits(); + #if MI_ARCH_X64 // canonical address is limited to the first 128 TiB + if (vbits >= 48) { vbits = 47; } + #endif + } + + // Allocate the page map and commit bits + mi_page_map_max_address = (void*)(vbits >= MI_SIZE_BITS ? (SIZE_MAX - MI_ARENA_SLICE_SIZE + 1) : (MI_PU(1) << vbits)); + const size_t page_map_size = (MI_ZU(1) << (vbits - MI_ARENA_SLICE_SHIFT)); + const bool commit = (page_map_size <= 1*MI_MiB || mi_option_is_enabled(mi_option_pagemap_commit)); // _mi_os_has_overcommit(); // commit on-access on Linux systems? + const size_t commit_bits = _mi_divide_up(page_map_size, MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT); + const size_t bitmap_size = (commit ? 0 : mi_bitmap_size(commit_bits, NULL)); + const size_t reserve_size = bitmap_size + page_map_size; + uint8_t* const base = (uint8_t*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); + if (base==NULL) { + _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); + return false; + } + if (mi_page_map_memid.initially_committed && !mi_page_map_memid.initially_zero) { + _mi_warning_message("internal: the page map was committed but not zero initialized!\n"); + _mi_memzero_aligned(base, reserve_size); + } + if (bitmap_size > 0) { + mi_page_map_commit = (mi_bitmap_t*)base; + if (!_mi_os_commit(mi_page_map_commit, bitmap_size, NULL)) { + mi_page_map_cannot_commit(); + return false; + } + mi_bitmap_init(mi_page_map_commit, commit_bits, true); + } + _mi_page_map = base + bitmap_size; + + // commit the first part so NULL pointers get resolved without an access violation + if (!commit) { + mi_page_map_ensure_committed(0, 1); + } + _mi_page_map[0] = 1; // so _mi_ptr_page(NULL) == NULL + mi_assert_internal(_mi_ptr_page(NULL)==NULL); + return true; +} + +void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) { + mi_assert_internal(subproc != NULL); + mi_assert_internal(_mi_page_map != NULL); + if (_mi_page_map == NULL) return; + _mi_os_free_ex(mi_page_map_memid.mem.os.base, mi_page_map_memid.mem.os.size, true, mi_page_map_memid, subproc); + _mi_page_map = NULL; + mi_page_map_commit = NULL; + mi_page_map_max_address = NULL; + mi_page_map_memid = _mi_memid_none(); +} + + +static void mi_page_map_ensure_committed(size_t idx, size_t slice_count) { + // is the page map area that contains the page address committed? + // we always set the commit bits so we can track what ranges are in-use. + // we only actually commit if the map wasn't committed fully already. + if (mi_page_map_commit != NULL) { + const size_t commit_idx = idx / MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; + const size_t commit_idx_hi = (idx + slice_count - 1) / MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; + for (size_t i = commit_idx; i <= commit_idx_hi; i++) { // per bit to avoid crossing over bitmap chunks + if (mi_bitmap_is_clear(mi_page_map_commit, i)) { + // this may race, in which case we do multiple commits (which is ok) + bool is_zero; + uint8_t* const start = _mi_page_map + (i * MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT); + const size_t size = MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT; + if (!_mi_os_commit(start, size, &is_zero)) return; + if (!is_zero && !mi_page_map_memid.initially_zero) { _mi_memzero(start, size); } + mi_bitmap_set(mi_page_map_commit, i); + } + } + } + #if MI_DEBUG > 0 + _mi_page_map[idx] = 0; + _mi_page_map[idx+slice_count-1] = 0; + #endif +} + + +static size_t mi_page_map_get_idx(mi_page_t* page, uint8_t** page_start, size_t* slice_count) { + size_t page_size; + *page_start = mi_page_area(page, &page_size); + if (page_size > MI_LARGE_PAGE_SIZE) { page_size = MI_LARGE_PAGE_SIZE - MI_ARENA_SLICE_SIZE; } // furthest interior pointer + *slice_count = mi_slice_count_of_size(page_size) + (((uint8_t*)*page_start - (uint8_t*)page)/MI_ARENA_SLICE_SIZE); // add for large aligned blocks + return _mi_page_map_index(page); +} + +void _mi_page_map_register(mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! + if mi_unlikely(_mi_page_map == NULL) { + if (!_mi_page_map_init()) return; + } + mi_assert(_mi_page_map!=NULL); + uint8_t* page_start; + size_t slice_count; + const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count); + + mi_page_map_ensure_committed(idx, slice_count); + + // set the offsets + for (size_t i = 0; i < slice_count; i++) { + mi_assert_internal(i < 128); + _mi_page_map[idx + i] = (uint8_t)(i+1); + } +} + +void _mi_page_map_unregister(mi_page_t* page) { + mi_assert_internal(_mi_page_map != NULL); + // get index and count + uint8_t* page_start; + size_t slice_count; + const size_t idx = mi_page_map_get_idx(page, &page_start, &slice_count); + // unset the offsets + _mi_memzero(_mi_page_map + idx, slice_count); +} + +void _mi_page_map_unregister_range(void* start, size_t size) { + const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE); + const uintptr_t index = _mi_page_map_index(start); + mi_page_map_ensure_committed(index, slice_count); // we commit the range in total; todo: scan the commit bits and clear only those ranges? + _mi_memzero(&_mi_page_map[index], slice_count); +} + + +mi_page_t* _mi_safe_ptr_page(const void* p) { + if mi_unlikely(p >= mi_page_map_max_address) return NULL; + const uintptr_t idx = _mi_page_map_index(p); + if mi_unlikely(mi_page_map_commit != NULL && !mi_bitmap_is_set(mi_page_map_commit, idx/MI_PAGE_MAP_ENTRIES_PER_COMMIT_BIT)) return NULL; + const uintptr_t ofs = _mi_page_map[idx]; + if mi_unlikely(ofs == 0) return NULL; + return (mi_page_t*)((((uintptr_t)p >> MI_ARENA_SLICE_SHIFT) - ofs + 1) << MI_ARENA_SLICE_SHIFT); +} + +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { + return (_mi_safe_ptr_page(p) != NULL); +} + +#else + +// A 2-level page map +#define MI_PAGE_MAP_SUB_SIZE (MI_PAGE_MAP_SUB_COUNT * sizeof(mi_page_t*)) + +mi_decl_cache_align _Atomic(mi_page_t**)* _mi_page_map; +static size_t mi_page_map_count; +static void* mi_page_map_max_address; +static mi_memid_t mi_page_map_memid; + +// divide the main map in 64 (`MI_BFIELD_BITS`) parts commit those parts on demand +static _Atomic(mi_bfield_t) mi_page_map_commit; + +#define MI_PAGE_MAP_ENTRIES_PER_CBIT (MI_PAGE_MAP_COUNT / MI_BFIELD_BITS) + +static inline bool mi_page_map_is_committed(size_t idx, size_t* pbit_idx) { + mi_bfield_t commit = mi_atomic_load_relaxed(&mi_page_map_commit); + const size_t bit_idx = idx/MI_PAGE_MAP_ENTRIES_PER_CBIT; + mi_assert_internal(bit_idx < MI_BFIELD_BITS); + if (pbit_idx != NULL) { *pbit_idx = bit_idx; } + return ((commit & (MI_ZU(1) << bit_idx)) != 0); +} + +static mi_page_t** mi_page_map_ensure_committed(size_t idx) { + size_t bit_idx; + if mi_unlikely(!mi_page_map_is_committed(idx, &bit_idx)) { + uint8_t* start = (uint8_t*)&_mi_page_map[bit_idx * MI_PAGE_MAP_ENTRIES_PER_CBIT]; + if (!_mi_os_commit(start, MI_PAGE_MAP_ENTRIES_PER_CBIT * sizeof(mi_page_t**), NULL)) { + return NULL; + } + mi_atomic_or_acq_rel(&mi_page_map_commit, MI_ZU(1) << bit_idx); + } + return mi_atomic_load_ptr_acquire(mi_page_t*, &_mi_page_map[idx]); // _mi_page_map_at(idx); +} + +// initialize the page map +bool _mi_page_map_init(void) { + size_t vbits = (size_t)mi_option_get_clamp(mi_option_max_vabits, 0, MI_SIZE_BITS); + if (vbits == 0) { + vbits = _mi_os_virtual_address_bits(); + #if MI_ARCH_X64 // canonical address is limited to the first 128 TiB + if (vbits >= 48) { vbits = 47; } + #endif + } + + // Allocate the page map and commit bits + mi_assert(MI_MAX_VABITS >= vbits); + mi_page_map_max_address = (void*)(vbits >= MI_SIZE_BITS ? (SIZE_MAX - MI_ARENA_SLICE_SIZE + 1) : (MI_PU(1) << vbits)); + mi_page_map_count = (MI_ZU(1) << (vbits - MI_PAGE_MAP_SUB_SHIFT - MI_ARENA_SLICE_SHIFT)); + mi_assert(mi_page_map_count <= MI_PAGE_MAP_COUNT); + const size_t os_page_size = _mi_os_page_size(); + const size_t page_map_size = _mi_align_up( mi_page_map_count * sizeof(mi_page_t**), os_page_size); + const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; + const size_t reserve_size = page_map_size + submap_size; + #if MI_SECURE + const bool commit = true; // the whole page map is valid and we can reliably check any pointer + #else + const bool commit = page_map_size <= 64*MI_KiB || + mi_option_is_enabled(mi_option_pagemap_commit) || _mi_os_has_overcommit(); + #endif + _mi_page_map = (_Atomic(mi_page_t**)*)_mi_os_alloc_aligned(reserve_size, 1, commit, true /* allow large */, &mi_page_map_memid); + if (_mi_page_map==NULL) { + _mi_error_message(ENOMEM, "unable to reserve virtual memory for the page map (%zu KiB)\n", page_map_size / MI_KiB); + return false; + } + if (mi_page_map_memid.initially_committed && !mi_page_map_memid.initially_zero) { + _mi_warning_message("internal: the page map was committed but not zero initialized!\n"); + _mi_memzero_aligned(_mi_page_map, page_map_size); + } + mi_atomic_store_release(&mi_page_map_commit, (mi_page_map_memid.initially_committed ? ~MI_ZU(0) : MI_ZU(0))); + + // ensure there is a submap for the NULL address + mi_page_t** const sub0 = (mi_page_t**)((uint8_t*)_mi_page_map + page_map_size); // we reserved a submap part at the end already + if (!mi_page_map_memid.initially_committed) { + if (!_mi_os_commit(sub0, submap_size, NULL)) { // commit full submap (issue #1087) + mi_page_map_cannot_commit(); + return false; + } + } + if (!mi_page_map_memid.initially_zero) { // initialize low addresses with NULL + _mi_memzero_aligned(sub0, submap_size); + } + mi_page_map_ensure_committed(0); + mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[0], sub0); + + mi_assert_internal(_mi_ptr_page(NULL)==NULL); + return true; +} + + +void _mi_page_map_unsafe_destroy(mi_subproc_t* subproc) { + mi_assert_internal(subproc != NULL); + mi_assert_internal(_mi_page_map != NULL); + if (_mi_page_map == NULL) return; + for (size_t idx = 1; idx < mi_page_map_count; idx++) { // skip entry 0 (as we allocate that submap at the end of the page_map) + // free all sub-maps + if (mi_page_map_is_committed(idx, NULL)) { + mi_page_t** sub = _mi_page_map_at(idx); + if (sub != NULL) { + mi_memid_t memid = _mi_memid_create_os(sub, MI_PAGE_MAP_SUB_SIZE, true, false, false); + _mi_os_free_ex(memid.mem.os.base, memid.mem.os.size, true, memid, subproc); + mi_atomic_store_ptr_release(mi_page_t*, &_mi_page_map[idx], NULL); + } + } + } + _mi_os_free_ex(_mi_page_map, mi_page_map_memid.mem.os.size, true, mi_page_map_memid, subproc); + _mi_page_map = NULL; + mi_page_map_count = 0; + mi_page_map_memid = _mi_memid_none(); + mi_page_map_max_address = NULL; + mi_atomic_store_release(&mi_page_map_commit, 0); +} + + +static mi_page_t** mi_page_map_ensure_submap_at(size_t idx) { + mi_page_t** sub = mi_page_map_ensure_committed(idx); + if mi_unlikely(sub == NULL) { + // sub map not yet allocated, alloc now + mi_memid_t memid; + mi_page_t** expect = sub; + const size_t submap_size = MI_PAGE_MAP_SUB_SIZE; + sub = (mi_page_t**)_mi_os_zalloc(submap_size, &memid); + if (sub == NULL) { + _mi_error_message(EFAULT, "internal error: unable to extend the page map\n"); + return NULL; + } + if (!mi_atomic_cas_ptr_strong_acq_rel(mi_page_t*, &_mi_page_map[idx], &expect, sub)) { + // another thread already allocated it.. free and continue + _mi_os_free(sub, submap_size, memid); + sub = expect; + mi_assert_internal(sub!=NULL); + } + } + return sub; +} + +static void mi_page_map_set_range(mi_page_t* page, size_t idx, size_t sub_idx, size_t slice_count) { + // is the page map area that contains the page address committed? + while (slice_count > 0) { + mi_page_t** sub = mi_page_map_ensure_submap_at(idx); + // set the offsets for the page + while (sub_idx < MI_PAGE_MAP_SUB_COUNT) { + sub[sub_idx] = page; + slice_count--; if (slice_count == 0) return; + sub_idx++; + } + idx++; // potentially wrap around to the next idx + sub_idx = 0; + } +} + +static size_t mi_page_map_get_idx(mi_page_t* page, size_t* sub_idx, size_t* slice_count) { + size_t page_size; + uint8_t* page_start = mi_page_area(page, &page_size); + if (page_size > MI_LARGE_PAGE_SIZE) { page_size = MI_LARGE_PAGE_SIZE - MI_ARENA_SLICE_SIZE; } // furthest interior pointer + *slice_count = mi_slice_count_of_size(page_size) + ((page_start - (uint8_t*)page)/MI_ARENA_SLICE_SIZE); // add for large aligned blocks + return _mi_page_map_index(page, sub_idx); +} + +void _mi_page_map_register(mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_page_map != NULL); // should be initialized before multi-thread access! + if mi_unlikely(_mi_page_map == NULL) { + if (!_mi_page_map_init()) return; + } + mi_assert(_mi_page_map!=NULL); + size_t slice_count; + size_t sub_idx; + const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count); + mi_page_map_set_range(page, idx, sub_idx, slice_count); +} + +void _mi_page_map_unregister(mi_page_t* page) { + mi_assert_internal(_mi_page_map != NULL); + mi_assert_internal(page != NULL); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + if mi_unlikely(_mi_page_map == NULL) return; + // get index and count + size_t slice_count; + size_t sub_idx; + const size_t idx = mi_page_map_get_idx(page, &sub_idx, &slice_count); + // unset the offsets + mi_page_map_set_range(NULL, idx, sub_idx, slice_count); +} + +void _mi_page_map_unregister_range(void* start, size_t size) { + if mi_unlikely(_mi_page_map == NULL) return; + const size_t slice_count = _mi_divide_up(size, MI_ARENA_SLICE_SIZE); + size_t sub_idx; + const uintptr_t idx = _mi_page_map_index(start, &sub_idx); + mi_page_map_set_range(NULL, idx, sub_idx, slice_count); // todo: avoid committing if not already committed? +} + +// Return NULL for invalid pointers +mi_page_t* _mi_safe_ptr_page(const void* p) { + if (p==NULL) return NULL; + if mi_unlikely(p >= mi_page_map_max_address) return NULL; + size_t sub_idx; + const size_t idx = _mi_page_map_index(p,&sub_idx); + if mi_unlikely(!mi_page_map_is_committed(idx,NULL)) return NULL; + mi_page_t** const sub = _mi_page_map[idx]; + if mi_unlikely(sub==NULL) return NULL; + return sub[sub_idx]; +} + +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { + return (_mi_safe_ptr_page(p) != NULL); +} + +#endif diff --git a/depends/mimalloc/src/page-queue.c b/depends/mimalloc/src/page-queue.c index 92f933c2a0d7..91bb0ef9ddeb 100644 --- a/depends/mimalloc/src/page-queue.c +++ b/depends/mimalloc/src/page-queue.c @@ -1,5 +1,5 @@ /*---------------------------------------------------------------------------- -Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -11,6 +11,10 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MI_IN_PAGE_C #error "this file should be included from 'page.c'" +// include to help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" #endif /* ----------------------------------------------------------- @@ -34,15 +38,19 @@ terms of the MIT license. A copy of the license can be found in the file static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) { - return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t))); + return (pq->block_size == (MI_LARGE_MAX_OBJ_SIZE+sizeof(uintptr_t))); } static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) { - return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t)))); + return (pq->block_size == (MI_LARGE_MAX_OBJ_SIZE+(2*sizeof(uintptr_t)))); } static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { - return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX); + return (pq->block_size > MI_LARGE_MAX_OBJ_SIZE); +} + +static inline size_t mi_page_queue_count(const mi_page_queue_t* pq) { + return pq->count; } /* ----------------------------------------------------------- @@ -53,27 +61,23 @@ static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { // Returns MI_BIN_HUGE if the size is too large. // We use `wsize` for the size in "machine word sizes", // i.e. byte size == `wsize*sizeof(void*)`. -static inline uint8_t mi_bin(size_t size) { +static mi_decl_noinline size_t mi_bin(size_t size) { size_t wsize = _mi_wsize_from_size(size); - uint8_t bin; - if (wsize <= 1) { - bin = 1; - } - #if defined(MI_ALIGN4W) - else if (wsize <= 4) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes +#if defined(MI_ALIGN4W) + if mi_likely(wsize <= 4) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } - #elif defined(MI_ALIGN2W) - else if (wsize <= 8) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes +#elif defined(MI_ALIGN2W) + if mi_likely(wsize <= 8) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } - #else - else if (wsize <= 8) { - bin = (uint8_t)wsize; +#else + if mi_likely(wsize <= 8) { + return (wsize == 0 ? 1 : wsize); } - #endif - else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { - bin = MI_BIN_HUGE; +#endif + else if mi_unlikely(wsize > MI_LARGE_MAX_OBJ_WSIZE) { + return MI_BIN_HUGE; } else { #if defined(MI_ALIGN4W) @@ -81,15 +85,14 @@ static inline uint8_t mi_bin(size_t size) { #endif wsize--; // find the highest bit - uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0 + const size_t b = (MI_SIZE_BITS - 1 - mi_clz(wsize)); // note: wsize != 0 // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). // - adjust with 3 because we use do not round the first 8 sizes // which each get an exact bin - bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; - mi_assert_internal(bin < MI_BIN_HUGE); + const size_t bin = ((b << 2) + ((wsize >> (b - 2)) & 0x03)) - 3; + mi_assert_internal(bin > 0 && bin < MI_BIN_HUGE); + return bin; } - mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE); - return bin; } @@ -98,21 +101,22 @@ static inline uint8_t mi_bin(size_t size) { Queue of pages with free blocks ----------------------------------------------------------- */ -uint8_t _mi_bin(size_t size) { +size_t _mi_bin(size_t size) { return mi_bin(size); } -size_t _mi_bin_size(uint8_t bin) { +size_t _mi_bin_size(size_t bin) { + mi_assert_internal(bin <= MI_BIN_HUGE); return _mi_heap_empty.pages[bin].block_size; } // Good size for allocation -size_t mi_good_size(size_t size) mi_attr_noexcept { - if (size <= MI_MEDIUM_OBJ_SIZE_MAX) { - return _mi_bin_size(mi_bin(size)); +mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept { + if (size <= MI_LARGE_MAX_OBJ_SIZE) { + return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE)); } else { - return _mi_align_up(size,_mi_os_page_size()); + return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size()); } } @@ -137,21 +141,53 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* } #endif -static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { - uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size)); - mi_heap_t* heap = mi_page_heap(page); - mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL); - mi_page_queue_t* pq = &heap->pages[bin]; - mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size); - mi_assert_expensive(mi_page_queue_contains(pq, page)); - return pq; +bool _mi_page_queue_is_valid(mi_heap_t* heap, const mi_page_queue_t* pq) { + MI_UNUSED_RELEASE(heap); + if (pq==NULL) return false; + size_t count = 0; MI_UNUSED_RELEASE(count); + mi_page_t* prev = NULL; MI_UNUSED_RELEASE(prev); + for (mi_page_t* page = pq->first; page != NULL; page = page->next) { + mi_assert_internal(page->prev == prev); + if (mi_page_is_in_full(page)) { + mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 2); + } + else if (mi_page_is_huge(page)) { + mi_assert_internal(_mi_wsize_from_size(pq->block_size) == MI_LARGE_MAX_OBJ_WSIZE + 1); + } + else { + mi_assert_internal(mi_page_block_size(page) == pq->block_size); + } + mi_assert_internal(page->heap == heap); + if (page->next == NULL) { + mi_assert_internal(pq->last == page); + } + count++; + prev = page; + } + mi_assert_internal(pq->count == count); + return true; } -static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { - uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size)); +size_t _mi_page_bin(const mi_page_t* page) { + const size_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); mi_assert_internal(bin <= MI_BIN_FULL); + return bin; +} + +static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { + mi_assert_internal(heap!=NULL); + const size_t bin = _mi_page_bin(page); mi_page_queue_t* pq = &heap->pages[bin]; - mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size); + mi_assert_internal((mi_page_block_size(page) == pq->block_size) || + (mi_page_is_huge(page) && mi_page_queue_is_huge(pq)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(pq))); + return pq; +} + +static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { + mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_assert_expensive(mi_page_queue_contains(pq, page)); return pq; } @@ -181,7 +217,7 @@ static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_que } else { // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped - uint8_t bin = mi_bin(size); + size_t bin = mi_bin(size); const mi_page_queue_t* prev = pq - 1; while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) { prev--; @@ -206,9 +242,11 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) { static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(mi_page_queue_contains(queue, page)); - mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + mi_assert_internal(queue->count >= 1); + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); mi_heap_t* heap = mi_page_heap(page); - if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == queue->last) queue->last = page->prev; @@ -219,9 +257,9 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { mi_heap_queue_first_update(heap,queue); } heap->page_count--; + queue->count--; page->next = NULL; page->prev = NULL; - // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL); mi_page_set_in_full(page,false); } @@ -229,14 +267,15 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(mi_page_heap(page) == heap); mi_assert_internal(!mi_page_queue_contains(queue, page)); - - mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); - mi_assert_internal(page->xblock_size == queue->block_size || - (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) || + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); + #endif + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); mi_page_set_in_full(page, mi_page_queue_is_full(queue)); - // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap); + page->next = queue->first; page->prev = NULL; if (queue->first != NULL) { @@ -247,25 +286,67 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_ else { queue->first = queue->last = page; } + queue->count++; // update direct mi_heap_queue_first_update(heap, queue); heap->page_count++; } +static void mi_page_queue_push_at_end(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(!mi_page_queue_contains(queue, page)); -static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + + mi_page_set_in_full(page, mi_page_queue_is_full(queue)); + + page->prev = queue->last; + page->next = NULL; + if (queue->last != NULL) { + mi_assert_internal(queue->last->next == NULL); + queue->last->next = page; + queue->last = page; + } + else { + queue->first = queue->last = page; + } + queue->count++; + + // update direct + if (queue->first == page) { + mi_heap_queue_first_update(heap, queue); + } + heap->page_count++; +} + +static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_queue_contains(queue, page)); + if (queue->first == page) return; + mi_page_queue_remove(queue, page); + mi_page_queue_push(heap, queue, page); + mi_assert_internal(queue->first == page); +} + +static void mi_page_queue_enqueue_from_ex(mi_page_queue_t* to, mi_page_queue_t* from, bool enqueue_at_end, mi_page_t* page) { mi_assert_internal(page != NULL); + mi_assert_internal(from->count >= 1); mi_assert_expensive(mi_page_queue_contains(from, page)); mi_assert_expensive(!mi_page_queue_contains(to, page)); - - mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) || - (page->xblock_size == to->block_size && mi_page_queue_is_full(from)) || - (page->xblock_size == from->block_size && mi_page_queue_is_full(to)) || - (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) || - (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to))); + const size_t bsize = mi_page_block_size(page); + MI_UNUSED(bsize); + mi_assert_internal((bsize == to->block_size && bsize == from->block_size) || + (bsize == to->block_size && mi_page_queue_is_full(from)) || + (bsize == from->block_size && mi_page_queue_is_full(to)) || + (mi_page_is_huge(page) && mi_page_queue_is_huge(to)) || + (mi_page_is_huge(page) && mi_page_queue_is_full(to))); mi_heap_t* heap = mi_page_heap(page); + + // delete from `from` if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == from->last) from->last = page->prev; @@ -275,23 +356,62 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro mi_assert_internal(mi_heap_contains_queue(heap, from)); mi_heap_queue_first_update(heap, from); } - - page->prev = to->last; - page->next = NULL; - if (to->last != NULL) { - mi_assert_internal(heap == mi_page_heap(to->last)); - to->last->next = page; - to->last = page; + from->count--; + + // insert into `to` + to->count++; + if (enqueue_at_end) { + // enqueue at the end + page->prev = to->last; + page->next = NULL; + if (to->last != NULL) { + mi_assert_internal(heap == mi_page_heap(to->last)); + to->last->next = page; + to->last = page; + } + else { + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } } else { - to->first = page; - to->last = page; - mi_heap_queue_first_update(heap, to); + if (to->first != NULL) { + // enqueue at 2nd place + mi_assert_internal(heap == mi_page_heap(to->first)); + mi_page_t* next = to->first->next; + page->prev = to->first; + page->next = next; + to->first->next = page; + if (next != NULL) { + next->prev = page; + } + else { + to->last = page; + } + } + else { + // enqueue at the head (singleton list) + page->prev = NULL; + page->next = NULL; + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } } mi_page_set_in_full(page, mi_page_queue_is_full(to)); } +static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end */, page); +} + +static void mi_page_queue_enqueue_from_full(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + // note: we could insert at the front to increase reuse, but it slows down certain benchmarks (like `alloc-test`) + mi_page_queue_enqueue_from_ex(to, from, true /* enqueue at the end of the `to` queue? */, page); +} + // Only called from `mi_heap_absorb`. size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { mi_assert_internal(mi_heap_contains_queue(heap,pq)); @@ -302,15 +422,10 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue // set append pages to new heap and count size_t count = 0; for (mi_page_t* page = append->first; page != NULL; page = page->next) { - // inline `mi_page_set_heap` to avoid wrong assertion during absorption; - // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive. - mi_atomic_store_release(&page->xheap, (uintptr_t)heap); - // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a - // side effect that it spins until any DELAYED_FREEING is finished. This ensures - // that after appending only the new heap will be used for delayed free operations. - _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); + mi_page_set_heap(page, heap); count++; } + mi_assert_internal(count == append->count); if (pq->last==NULL) { // take over afresh @@ -327,5 +442,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue append->first->prev = pq->last; pq->last = append->last; } + pq->count += append->count; + return count; } diff --git a/depends/mimalloc/src/page.c b/depends/mimalloc/src/page.c index 4b321156c6a4..b80b4463b354 100644 --- a/depends/mimalloc/src/page.c +++ b/depends/mimalloc/src/page.c @@ -1,5 +1,5 @@ /*---------------------------------------------------------------------------- -Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. @@ -12,8 +12,8 @@ terms of the MIT license. A copy of the license can be found in the file ----------------------------------------------------------- */ #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" /* ----------------------------------------------------------- Definition of page queues for each block size @@ -36,14 +36,15 @@ static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_sta return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); } -static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); +//static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page); #if (MI_DEBUG>=3) static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { + mi_assert_internal(_mi_ptr_page(page) == page); size_t count = 0; while (head != NULL) { - mi_assert_internal(page == _mi_ptr_page(head)); + mi_assert_internal((uint8_t*)head - (uint8_t*)page > (ptrdiff_t)MI_LARGE_PAGE_SIZE || page == _mi_ptr_page(head)); count++; head = mi_block_next(page, head); } @@ -59,32 +60,38 @@ static inline uint8_t* mi_page_area(const mi_page_t* page) { static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { size_t psize; - uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize); + uint8_t* page_area = mi_page_area(page, &psize); mi_block_t* start = (mi_block_t*)page_area; mi_block_t* end = (mi_block_t*)(page_area + psize); while(p != NULL) { if (p < start || p >= end) return false; p = mi_block_next(page, p); } +#if MI_DEBUG>3 // generally too expensive to check this + if (page->free_is_zero) { + const size_t ubsize = mi_page_usable_block_size(page); + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { + mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); + } + } +#endif return true; } static bool mi_page_is_valid_init(mi_page_t* page) { - mi_assert_internal(page->xblock_size > 0); + mi_assert_internal(mi_page_block_size(page) > 0); mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); - mi_segment_t* segment = _mi_page_segment(page); - uint8_t* start = _mi_page_start(segment,page,NULL); - mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL)); - //const size_t bsize = mi_page_block_size(page); + // const size_t bsize = mi_page_block_size(page); + // uint8_t* start = mi_page_start(page); //mi_assert_internal(start + page->capacity*page->block_size == page->top); mi_assert_internal(mi_page_list_is_valid(page,page->free)); mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); #if MI_DEBUG>3 // generally too expensive to check this - if (page->is_zero) { + if (page->free_is_zero) { const size_t ubsize = mi_page_usable_block_size(page); for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); @@ -92,10 +99,12 @@ static bool mi_page_is_valid_init(mi_page_t* page) { } #endif + #if !MI_TRACK_ENABLED && !MI_TSAN mi_block_t* tfree = mi_page_thread_free(page); mi_assert_internal(mi_page_list_is_valid(page, tfree)); //size_t tfree_count = mi_page_list_count(page, tfree); //mi_assert_internal(tfree_count <= page->thread_freed + 1); + #endif size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); mi_assert_internal(page->used + free_count == page->capacity); @@ -103,89 +112,45 @@ static bool mi_page_is_valid_init(mi_page_t* page) { return true; } +extern mi_decl_hidden bool _mi_process_is_initialized; // has mi_process_init been called? + bool _mi_page_is_valid(mi_page_t* page) { mi_assert_internal(mi_page_is_valid_init(page)); #if MI_SECURE mi_assert_internal(page->keys[0] != 0); #endif - if (mi_page_heap(page)!=NULL) { - mi_segment_t* segment = _mi_page_segment(page); - - mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); - if (segment->kind != MI_SEGMENT_HUGE) { + if (!mi_page_is_abandoned(page)) { + //mi_assert_internal(!_mi_process_is_initialized); + { mi_page_queue_t* pq = mi_page_queue_of(page); mi_assert_internal(mi_page_queue_contains(pq, page)); - mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); - mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); + mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_is_huge(page) || mi_page_is_in_full(page)); + // mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); } } return true; } #endif -void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { - while (!_mi_page_try_use_delayed_free(page, delay, override_never)) { - mi_atomic_yield(); - } -} - -bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { - mi_thread_free_t tfreex; - mi_delayed_t old_delay; - mi_thread_free_t tfree; - size_t yield_count = 0; - do { - tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS; - tfreex = mi_tf_set_delayed(tfree, delay); - old_delay = mi_tf_delayed(tfree); - if mi_unlikely(old_delay == MI_DELAYED_FREEING) { - if (yield_count >= 4) return false; // give up after 4 tries - yield_count++; - mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. - // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail - } - else if (delay == old_delay) { - break; // avoid atomic operation if already equal - } - else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { - break; // leave never-delayed flag set - } - } while ((old_delay == MI_DELAYED_FREEING) || - !mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); - - return true; // success -} /* ----------------------------------------------------------- Page collect the `local_free` and `thread_free` lists ----------------------------------------------------------- */ -// Collect the local `thread_free` list using an atomic exchange. -// Note: The exchange must be done atomically as this is used right after -// moving to the full list in `mi_page_collect_ex` and we need to -// ensure that there was no race where the page became unfull just before the move. -static void _mi_page_thread_free_collect(mi_page_t* page) +static void mi_page_thread_collect_to_local(mi_page_t* page, mi_block_t* head) { - mi_block_t* head; - mi_thread_free_t tfreex; - mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); - do { - head = mi_tf_block(tfree); - tfreex = mi_tf_set_block(tfree,NULL); - } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); - - // return if the list is empty if (head == NULL) return; - // find the tail -- also to get a proper count (without data races) - uint32_t max_count = page->capacity; // cannot collect more than capacity - uint32_t count = 1; - mi_block_t* tail = head; + // find the last block in the list -- also to get a proper use count (without data races) + size_t max_count = page->capacity; // cannot collect more than capacity + size_t count = 1; + mi_block_t* last = head; mi_block_t* next; - while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { + while ((next = mi_block_next(page, last)) != NULL && count <= max_count) { count++; - tail = next; + last = next; } + // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free) if (count > max_count) { _mi_error_message(EFAULT, "corrupted thread-free list\n"); @@ -193,20 +158,37 @@ static void _mi_page_thread_free_collect(mi_page_t* page) } // and append the current local free list - mi_block_set_next(page,tail, page->local_free); + mi_block_set_next(page, last, page->local_free); page->local_free = head; // update counts now - page->used -= count; + mi_assert_internal(count <= UINT16_MAX); + page->used = page->used - (uint16_t)count; +} + +// Collect the local `thread_free` list using an atomic exchange. +static void mi_page_thread_free_collect(mi_page_t* page) +{ + // atomically capture the thread free list + mi_block_t* head; + mi_thread_free_t tfreex; + mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + head = mi_tf_block(tfree); + if mi_likely(head == NULL) return; // return if the list is empty + tfreex = mi_tf_create(NULL,mi_tf_is_owned(tfree)); // set the thread free list to NULL + } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); // release is enough? + mi_assert_internal(head != NULL); + + // and move it to the local list + mi_page_thread_collect_to_local(page, head); } void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(page!=NULL); // collect the thread free list - if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation - _mi_page_thread_free_collect(page); - } + mi_page_thread_free_collect(page); // and the local free list if (page->local_free != NULL) { @@ -214,7 +196,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // usual case page->free = page->local_free; page->local_free = NULL; - page->is_zero = false; + page->free_is_zero = false; } else if (force) { // append -- only on shutdown (force) as this is a linear operation @@ -226,45 +208,121 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { mi_block_set_next(page, tail, page->free); page->free = page->local_free; page->local_free = NULL; - page->is_zero = false; + page->free_is_zero = false; } } mi_assert_internal(!force || page->local_free == NULL); } +// Collect elements in the thread-free list starting at `head`. This is an optimized +// version of `_mi_page_free_collect` to be used from `free.c:_mi_free_collect_mt` that avoids atomic access to `xthread_free`. +// +// `head` must be in the `xthread_free` list. It will not collect `head` itself +// so the `used` count is not fully updated in general. However, if the `head` is +// the last remaining element, it will be collected and the used count will become `0` (so `mi_page_all_free` becomes true). +void _mi_page_free_collect_partly(mi_page_t* page, mi_block_t* head) { + if (head == NULL) return; + mi_block_t* next = mi_block_next(page,head); // we cannot collect the head element itself as `page->thread_free` may point to it (and we want to avoid atomic ops) + if (next != NULL) { + mi_block_set_next(page, head, NULL); + mi_page_thread_collect_to_local(page, next); + if (page->local_free != NULL && page->free == NULL) { + page->free = page->local_free; + page->local_free = NULL; + page->free_is_zero = false; + } + } + if (page->used == 1) { + // all elements are free'd since we skipped the `head` element itself + mi_assert_internal(mi_tf_block(mi_atomic_load_relaxed(&page->xthread_free)) == head); + mi_assert_internal(mi_block_next(page,head) == NULL); + _mi_page_free_collect(page, false); // collect the final element + } +} /* ----------------------------------------------------------- Page fresh and retire ----------------------------------------------------------- */ +/* // called from segments when reclaiming abandoned pages void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { + // mi_page_set_heap(page, heap); + // _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) + _mi_page_free_collect(page, false); // ensure used count is up to date + mi_assert_expensive(mi_page_is_valid_init(page)); + // mi_assert_internal(mi_page_heap(page) == heap); + // mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); - mi_assert_internal(mi_page_heap(page) == heap); - mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); - mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); - mi_assert_internal(!page->is_reset); // TODO: push on full queue immediately if it is full? - mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page)); } +*/ + +// called from `mi_free` on a reclaim, and fresh_alloc if we get an abandoned page +void _mi_heap_page_reclaim(mi_heap_t* heap, mi_page_t* page) +{ + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(mi_page_is_abandoned(page)); + + mi_page_set_heap(page,heap); + _mi_page_free_collect(page, false); // ensure used count is up to date + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_page_queue_push_at_end(heap, pq, page); + mi_assert_expensive(_mi_page_is_valid(page)); +} + +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { + _mi_page_free_collect(page, false); // ensure used count is up to date + if (mi_page_all_free(page)) { + _mi_page_free(page, pq); + } + else { + mi_page_queue_remove(pq, page); + mi_heap_t* heap = page->heap; + mi_page_set_heap(page, NULL); + page->heap = heap; // dont set heap to NULL so we can reclaim_on_free within the same heap + _mi_arenas_page_abandon(page, heap->tld); + _mi_arenas_collect(false, false, heap->tld); // allow purging + } +} + // allocate a fresh page from a segment -static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) { - mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq)); - mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os); +static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) { + #if !MI_HUGE_PAGE_ABANDON + mi_assert_internal(pq != NULL); + mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_MAX_OBJ_SIZE || block_size == pq->block_size); + #endif + mi_page_t* page = _mi_arenas_page_alloc(heap, block_size, page_alignment); if (page == NULL) { - // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) + // out-of-memory return NULL; } - mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE); - mi_page_init(heap, page, block_size, heap->tld); - mi_heap_stat_increase(heap, pages, 1); - if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL + if (mi_page_is_abandoned(page)) { + _mi_heap_page_reclaim(heap, page); + if (!mi_page_immediate_available(page)) { + if (mi_page_is_expandable(page)) { + mi_page_extend_free(heap, page); + } + else { + mi_assert(false); // should not happen? + return NULL; + } + } + } + else if (pq != NULL) { + mi_page_queue_push(heap, pq, page); + } + mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); mi_assert_expensive(_mi_page_is_valid(page)); return page; } @@ -272,58 +330,24 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size // Get a fresh page to use static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { mi_assert_internal(mi_heap_contains_queue(heap, pq)); - mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size); + mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0); if (page==NULL) return NULL; mi_assert_internal(pq->block_size==mi_page_block_size(page)); - mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); + mi_assert_internal(pq==mi_heap_page_queue_of(heap, page)); return page; } -/* ----------------------------------------------------------- - Do any delayed frees - (put there by other threads if they deallocated in a full page) ------------------------------------------------------------ */ -void _mi_heap_delayed_free_all(mi_heap_t* heap) { - while (!_mi_heap_delayed_free_partial(heap)) { - mi_atomic_yield(); - } -} - -// returns true if all delayed frees were processed -bool _mi_heap_delayed_free_partial(mi_heap_t* heap) { - // take over the list (note: no atomic exchange since it is often NULL) - mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); - while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ }; - bool all_freed = true; - - // and free them all - while(block != NULL) { - mi_block_t* next = mi_block_nextx(heap,block, heap->keys); - // use internal free instead of regular one to keep stats etc correct - if (!_mi_free_delayed_block(block)) { - // we might already start delayed freeing while another thread has not yet - // reset the delayed_freeing flag; in that case delay it further by reinserting the current block - // into the delayed free list - all_freed = false; - mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); - do { - mi_block_set_nextx(heap, block, dfree, heap->keys); - } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); - } - block = next; - } - return all_freed; -} /* ----------------------------------------------------------- Unfull, abandon, free and retire ----------------------------------------------------------- */ -// Move a page from the full list back to a regular list +// Move a page from the full list back to a regular list (called from thread-local mi_free) void _mi_page_unfull(mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_is_in_full(page)); + mi_assert_internal(!mi_page_heap(page)->allow_page_abandon); if (!mi_page_is_in_full(page)) return; mi_heap_t* heap = mi_page_heap(page); @@ -331,7 +355,7 @@ void _mi_page_unfull(mi_page_t* page) { mi_page_set_in_full(page, false); // to get the right queue mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); mi_page_set_in_full(page, true); - mi_page_queue_enqueue_from(pq, pqfull, page); + mi_page_queue_enqueue_from_full(pq, pqfull, page); } static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { @@ -339,71 +363,43 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(!mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page)); - if (mi_page_is_in_full(page)) return; - mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); - _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set -} - - -// Abandon a page with used blocks at the end of a thread. -// Note: only call if it is ensured that no references exist from -// the `page->heap->thread_delayed_free` into this page. -// Currently only called through `mi_heap_collect_ex` which ensures this. -void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { - mi_assert_internal(page != NULL); - mi_assert_expensive(_mi_page_is_valid(page)); - mi_assert_internal(pq == mi_page_queue_of(page)); - mi_assert_internal(mi_page_heap(page) != NULL); - - mi_heap_t* pheap = mi_page_heap(page); - - // remove from our page list - mi_segments_tld_t* segments_tld = &pheap->tld->segments; - mi_page_queue_remove(pq, page); - - // page is no longer associated with our heap - mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); - mi_page_set_heap(page, NULL); - -#if MI_DEBUG>1 - // check there are no references left.. - for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) { - mi_assert_internal(_mi_ptr_page(block) != page); + mi_heap_t* heap = mi_page_heap(page); + if (heap->allow_page_abandon) { + // abandon full pages (this is the usual case in order to allow for sharing of memory between heaps) + _mi_page_abandon(page, pq); + } + else if (!mi_page_is_in_full(page)) { + // put full pages in a heap local queue (this is for heaps that cannot abandon, for example, if the heap can be destroyed) + mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); + _mi_page_free_collect(page, false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set } -#endif - - // and abandon it - mi_assert_internal(mi_page_heap(page) == NULL); - _mi_segment_page_abandon(page,segments_tld); } // Free a page with no more free blocks -void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(mi_page_all_free(page)); - mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); + // mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); // no more aligned blocks in here mi_page_set_has_aligned(page, false); - mi_heap_t* heap = mi_page_heap(page); - // remove from the page list // (no need to do _mi_heap_delayed_free first as all blocks are already free) - mi_segments_tld_t* segments_tld = &heap->tld->segments; mi_page_queue_remove(pq, page); // and free it + mi_tld_t* const tld = page->heap->tld; mi_page_set_heap(page,NULL); - _mi_segment_page_free(page, force, segments_tld); + _mi_arenas_page_free(page,tld); + _mi_arenas_collect(false, false, tld); // allow purging } -// Retire parameters -#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX -#define MI_RETIRE_CYCLES (8) +#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE +#define MI_RETIRE_CYCLES (16) // Retire a page with no more used blocks // Important to not retire too quickly though as new @@ -415,7 +411,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_all_free(page)); - + mi_page_set_has_aligned(page, false); // don't retire too often.. @@ -425,21 +421,26 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. mi_page_queue_t* pq = mi_page_queue_of(page); - if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) { + #if MI_RETIRE_CYCLES > 0 + const size_t bsize = mi_page_block_size(page); + if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? if (pq->last==page && pq->first==page) { // the only page in the queue? - mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); - page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); mi_heap_t* heap = mi_page_heap(page); + #if MI_STAT>0 + mi_heap_stat_counter_increase(heap, pages_retire, 1); + #endif + page->retire_expire = (bsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); mi_assert_internal(pq >= heap->pages); const size_t index = pq - heap->pages; mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE); if (index < heap->page_retired_min) heap->page_retired_min = index; if (index > heap->page_retired_max) heap->page_retired_max = index; mi_assert_internal(mi_page_all_free(page)); - return; // dont't free after all + return; // don't free after all } } - _mi_page_free(page, pq, false); + #endif + _mi_page_free(page, pq); } // free retired pages: we don't need to look at the entire queues @@ -454,7 +455,7 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { if (mi_page_all_free(page)) { page->retire_expire--; if (force || page->retire_expire == 0) { - _mi_page_free(pq->first, pq, force); + _mi_page_free(pq->first, pq); } else { // keep retired, update min/max @@ -471,6 +472,29 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { heap->page_retired_max = max; } +/* +static void mi_heap_collect_full_pages(mi_heap_t* heap) { + // note: normally full pages get immediately abandoned and the full queue is always empty + // this path is only used if abandoning is disabled due to a destroy-able heap or options + // set by the user. + mi_page_queue_t* pq = &heap->pages[MI_BIN_FULL]; + for (mi_page_t* page = pq->first; page != NULL; ) { + mi_page_t* next = page->next; // get next in case we free the page + _mi_page_free_collect(page, false); // register concurrent free's + // no longer full? + if (!mi_page_is_full(page)) { + if (mi_page_all_free(page)) { + _mi_page_free(page, pq); + } + else { + _mi_page_unfull(page); + } + } + page = next; + } +} +*/ + /* ----------------------------------------------------------- Initialize the initial free list in a page. @@ -484,13 +508,13 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { MI_UNUSED(stats); - #if (MI_SECURE<=2) + #if (MI_SECURE<3) mi_assert_internal(page->free == NULL); mi_assert_internal(page->local_free == NULL); #endif mi_assert_internal(page->capacity + extend <= page->reserved); mi_assert_internal(bsize == mi_page_block_size(page)); - void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); + void* const page_area = mi_page_start(page); // initialize a randomized free list // set up `slice_count` slices to alternate between @@ -542,13 +566,13 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { MI_UNUSED(stats); - #if (MI_SECURE <= 2) + #if (MI_SECURE<3) mi_assert_internal(page->free == NULL); mi_assert_internal(page->local_free == NULL); #endif mi_assert_internal(page->capacity + extend <= page->reserved); mi_assert_internal(bsize == mi_page_block_size(page)); - void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL ); + void* const page_area = mi_page_start(page); mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); @@ -570,7 +594,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co ----------------------------------------------------------- */ #define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well. -#if (MI_SECURE>0) +#if (MI_SECURE>=3) #define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many #else #define MI_MIN_EXTEND (1) @@ -581,29 +605,31 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co // Note: we also experimented with "bump" allocation on the first // allocations but this did not speed up any benchmark (due to an // extra test in malloc? or cache effects?) -static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { - MI_UNUSED(tld); +static bool mi_page_extend_free(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); - #if (MI_SECURE<=2) + #if (MI_SECURE<3) mi_assert(page->free == NULL); mi_assert(page->local_free == NULL); - if (page->free != NULL) return; + if (page->free != NULL) return true; #endif - if (page->capacity >= page->reserved) return; + if (page->capacity >= page->reserved) return true; size_t page_size; - _mi_page_start(_mi_page_segment(page), page, &page_size); - mi_stat_counter_increase(tld->stats.pages_extended, 1); + //uint8_t* page_start = + mi_page_area(page, &page_size); + #if MI_STAT>0 + mi_heap_stat_counter_increase(heap, pages_extended, 1); + #endif // calculate the extend count - const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size); - size_t extend = page->reserved - page->capacity; + const size_t bsize = mi_page_block_size(page); + size_t extend = (size_t)page->reserved - page->capacity; mi_assert_internal(extend > 0); - size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize); + size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize); if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; } mi_assert_internal(max_extend > 0); - + if (extend > max_extend) { // ensure we don't touch memory beyond the page to reduce page commit. // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%. @@ -613,70 +639,76 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved); mi_assert_internal(extend < (1UL<<16)); + // commit on demand? + if (page->slice_committed > 0) { + const size_t needed_size = (page->capacity + extend)*bsize; + const size_t needed_commit = _mi_align_up( mi_page_slice_offset_of(page, needed_size), MI_PAGE_MIN_COMMIT_SIZE ); + if (needed_commit > page->slice_committed) { + mi_assert_internal(((needed_commit - page->slice_committed) % _mi_os_page_size()) == 0); + if (!_mi_os_commit(mi_page_slice_start(page) + page->slice_committed, needed_commit - page->slice_committed, NULL)) { + return false; + } + page->slice_committed = needed_commit; + } + } + // and append the extend the free list - if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) { - mi_page_free_list_extend(page, bsize, extend, &tld->stats ); + if (extend < MI_MIN_SLICES || MI_SECURE<3) { //!mi_option_is_enabled(mi_option_secure)) { + mi_page_free_list_extend(page, bsize, extend, &heap->tld->stats ); } else { - mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); + mi_page_free_list_extend_secure(heap, page, bsize, extend, &heap->tld->stats); } // enable the new free list page->capacity += (uint16_t)extend; - mi_stat_increase(tld->stats.page_committed, extend * bsize); - - // extension into zero initialized memory preserves the zero'd free list - if (!page->is_zero_init) { - page->is_zero = false; - } + #if MI_STAT>0 + mi_heap_stat_increase(heap, page_committed, extend * bsize); + #endif mi_assert_expensive(mi_page_is_valid_init(page)); + return true; } -// Initialize a fresh page -static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) { +// Initialize a fresh page (that is already partially initialized) +mi_decl_nodiscard bool _mi_page_init(mi_heap_t* heap, mi_page_t* page) { mi_assert(page != NULL); - mi_segment_t* segment = _mi_page_segment(page); - mi_assert(segment != NULL); - mi_assert_internal(block_size > 0); - // set fields mi_page_set_heap(page, heap); - page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start + size_t page_size; - const void* page_start = _mi_segment_page_start(segment, page, &page_size); - MI_UNUSED(page_start); + uint8_t* page_start = mi_page_area(page, &page_size); MI_UNUSED(page_start); mi_track_mem_noaccess(page_start,page_size); - mi_assert_internal(mi_page_block_size(page) <= page_size); - mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE); - mi_assert_internal(page_size / block_size < (1L<<16)); - page->reserved = (uint16_t)(page_size / block_size); - #ifdef MI_ENCODE_FREELIST + mi_assert_internal(page_size / mi_page_block_size(page) < (1L<<16)); + mi_assert_internal(page->reserved > 0); + #if (MI_PADDING || MI_ENCODE_FREELIST) page->keys[0] = _mi_heap_random_next(heap); page->keys[1] = _mi_heap_random_next(heap); #endif - #if MI_DEBUG > 0 - page->is_zero = false; // ensure in debug mode we initialize with MI_DEBUG_UNINIT, see issue #501 - #else - page->is_zero = page->is_zero_init; + #if MI_DEBUG>2 + if (page->memid.initially_zero) { + mi_track_mem_defined(page->page_start, mi_page_committed(page)); + mi_assert_expensive(mi_mem_is_zero(page_start, mi_page_committed(page))); + } #endif - mi_assert_internal(page->is_committed); - mi_assert_internal(!page->is_reset); mi_assert_internal(page->capacity == 0); mi_assert_internal(page->free == NULL); mi_assert_internal(page->used == 0); - mi_assert_internal(page->xthread_free == 0); + mi_assert_internal(mi_page_is_owned(page)); + mi_assert_internal(page->xthread_free == 1); mi_assert_internal(page->next == NULL); mi_assert_internal(page->prev == NULL); mi_assert_internal(page->retire_expire == 0); mi_assert_internal(!mi_page_has_aligned(page)); - #if (MI_ENCODE_FREELIST) + #if (MI_PADDING || MI_ENCODE_FREELIST) mi_assert_internal(page->keys[0] != 0); mi_assert_internal(page->keys[1] != 0); #endif + mi_assert_internal(page->block_size_shift == 0 || (mi_page_block_size(page) == ((size_t)1 << page->block_size_shift))); mi_assert_expensive(mi_page_is_valid_init(page)); // initialize an initial free list - mi_page_extend_free(heap,page,tld); + if (!mi_page_extend_free(heap,page)) return false; mi_assert(mi_page_immediate_available(page)); + return true; } @@ -685,81 +717,139 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi -------------------------------------------------------------*/ // Find a page with free blocks of `page->block_size`. -static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) +static mi_decl_noinline mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) { // search through the pages in "next fit" order size_t count = 0; + long candidate_limit = 0; // we reset this on the first candidate to limit the search + long page_full_retain = (pq->block_size > MI_SMALL_MAX_OBJ_SIZE ? 0 : heap->page_full_retain); // only retain small pages + mi_page_t* page_candidate = NULL; // a page with free space mi_page_t* page = pq->first; + while (page != NULL) { - mi_page_t* next = page->next; // remember next + mi_page_t* next = page->next; // remember next (as this page can move to another queue) count++; + candidate_limit--; - // 0. collect freed blocks by us and other threads - _mi_page_free_collect(page, false); + // search up to N pages for a best candidate - // 1. if the page contains free blocks, we are done - if (mi_page_immediate_available(page)) { - break; // pick this one + // is the local free list non-empty? + bool immediate_available = mi_page_immediate_available(page); + if (!immediate_available) { + // collect freed blocks by us and other threads to we get a proper use count + _mi_page_free_collect(page, false); + immediate_available = mi_page_immediate_available(page); } - // 2. Try to extend - if (page->capacity < page->reserved) { - mi_page_extend_free(heap, page, heap->tld); - mi_assert_internal(mi_page_immediate_available(page)); - break; + // if the page is completely full, move it to the `mi_pages_full` + // queue so we don't visit long-lived pages too often. + if (!immediate_available && !mi_page_is_expandable(page)) { + page_full_retain--; + if (page_full_retain < 0) { + mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); + mi_page_to_full(page, pq); + } + } + else { + // the page has free space, make it a candidate + // we prefer non-expandable pages with high usage as candidates (to reduce commit, and increase chances of free-ing up pages) + if (page_candidate == NULL) { + page_candidate = page; + candidate_limit = _mi_option_get_fast(mi_option_page_max_candidates); + } + else if (mi_page_all_free(page_candidate)) { + _mi_page_free(page_candidate, pq); + page_candidate = page; + } + // prefer to reuse fuller pages (in the hope the less used page gets freed) + else if (page->used >= page_candidate->used && !mi_page_is_mostly_used(page)) { // && !mi_page_is_expandable(page)) { + page_candidate = page; + } + // if we find a non-expandable candidate, or searched for N pages, return with the best candidate + if (immediate_available || candidate_limit <= 0) { + mi_assert_internal(page_candidate!=NULL); + break; + } + } + + #if 0 + // first-fit algorithm without candidates + // If the page contains free blocks, we are done + if (mi_page_immediate_available(page) || mi_page_is_expandable(page)) { + break; // pick this one } - // 3. If the page is completely full, move it to the `mi_pages_full` + // If the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); mi_page_to_full(page, pq); + #endif page = next; } // for each page - mi_heap_stat_counter_increase(heap, searches, count); + mi_heap_stat_counter_increase(heap, page_searches, count); + + // set the page to the best candidate + if (page_candidate != NULL) { + page = page_candidate; + } + if (page != NULL) { + if (!mi_page_immediate_available(page)) { + mi_assert_internal(mi_page_is_expandable(page)); + if (!mi_page_extend_free(heap, page)) { + page = NULL; // failed to extend + } + } + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + } if (page == NULL) { - _mi_heap_collect_retired(heap, false); // perhaps make a page available? + _mi_heap_collect_retired(heap, false); // perhaps make a page available page = mi_page_fresh(heap, pq); + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); if (page == NULL && first_try) { // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again - page = mi_page_queue_find_free_ex(heap, pq, false); + page = mi_page_queue_find_free_ex(heap, pq, false); + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); } } else { - mi_assert(pq->first == page); + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + // move the page to the front of the queue + mi_page_queue_move_to_front(heap, pq, page); page->retire_expire = 0; + // _mi_heap_collect_retired(heap, false); // update retire counts; note: increases rss on MemoryLoad bench so don't do this } mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + + return page; } // Find a page with free blocks of `size`. -static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { - mi_page_queue_t* pq = mi_page_queue(heap,size); +static mi_page_t* mi_find_free_page(mi_heap_t* heap, mi_page_queue_t* pq) { + // mi_page_queue_t* pq = mi_page_queue(heap, size); + mi_assert_internal(!mi_page_queue_is_huge(pq)); + + // check the first page: we even do this with candidate search or otherwise we re-search every time mi_page_t* page = pq->first; - if (page != NULL) { - #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness + if mi_likely(page != NULL && mi_page_immediate_available(page)) { + #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { - mi_page_extend_free(heap, page, heap->tld); + mi_page_extend_free(heap, page); mi_assert_internal(mi_page_immediate_available(page)); } - else - #endif - { - _mi_page_free_collect(page,false); - } - - if (mi_page_immediate_available(page)) { - page->retire_expire = 0; - return page; // fast path - } + #endif + page->retire_expire = 0; + return page; // fast path + } + else { + return mi_page_queue_find_free_ex(heap, pq, true); } - return mi_page_queue_find_free_ex(heap, pq, true); } @@ -792,40 +882,30 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex General allocation ----------------------------------------------------------- */ -// Large and huge page allocation. -// Huge pages are allocated directly without being in a queue. -// Because huge pages contain just one block, and the segment contains -// just that page, we always treat them as abandoned and any thread -// that frees the block can free the whole page and segment directly. -static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) { - size_t block_size = _mi_os_good_alloc_size(size); - mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE); - bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX); - mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size)); - mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size); +// Huge pages contain just one block, and the segment contains just that page. +// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX) +// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`. +static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment, mi_page_queue_t* pq) { + const size_t block_size = _mi_os_good_alloc_size(size); + // mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0); + #if MI_HUGE_PAGE_ABANDON + #error todo. + #else + // mi_page_queue_t* pq = mi_page_queue(heap, MI_LARGE_MAX_OBJ_SIZE+1); // always in the huge queue regardless of the block size + mi_assert_internal(mi_page_queue_is_huge(pq)); + #endif + mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); if (page != NULL) { + mi_assert_internal(mi_page_block_size(page) >= size); mi_assert_internal(mi_page_immediate_available(page)); - - if (pq == NULL) { - // huge pages are directly abandoned - mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); - mi_assert_internal(_mi_page_segment(page)->used==1); - mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue - mi_page_set_heap(page, NULL); - } - else { - mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); - } - - const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding - if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_increase(heap, large, bsize); - mi_heap_stat_counter_increase(heap, large_count, 1); - } - else { - mi_heap_stat_increase(heap, huge, bsize); - mi_heap_stat_counter_increase(heap, huge_count, 1); - } + mi_assert_internal(mi_page_is_huge(page)); + mi_assert_internal(mi_page_is_singleton(page)); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(mi_page_is_abandoned(page)); + mi_page_set_heap(page, NULL); + #endif + mi_heap_stat_increase(heap, malloc_huge, mi_page_block_size(page)); + mi_heap_stat_counter_increase(heap, malloc_huge_count, 1); } return page; } @@ -833,70 +913,90 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) { // Allocate a page // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. -static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept { +static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept { + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) { + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); + return NULL; + } + mi_page_queue_t* pq = mi_page_queue(heap, (huge_alignment > 0 ? MI_LARGE_MAX_OBJ_SIZE+1 : size)); // huge allocation? - const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` - if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE)) { - if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see ) - _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); - return NULL; - } - else { - return mi_large_huge_page_alloc(heap,size); - } + if mi_unlikely(mi_page_queue_is_huge(pq) || req_size > MI_MAX_ALLOC_SIZE) { + return mi_huge_page_alloc(heap,size,huge_alignment,pq); } else { // otherwise find a page with free blocks in our size segregated queues + #if MI_PADDING mi_assert_internal(size >= MI_PADDING_SIZE); - return mi_find_free_page(heap, size); + #endif + return mi_find_free_page(heap, pq); } } + // Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. // Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. -void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept +// The `huge_alignment` is normally 0 but is set to a multiple of MI_SLICE_SIZE for +// very large requested alignments in which case we use a huge singleton page. +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { mi_assert_internal(heap != NULL); // initialize if necessary if mi_unlikely(!mi_heap_is_initialized(heap)) { - mi_thread_init(); // calls `_mi_heap_init` in turn - heap = mi_get_default_heap(); + heap = mi_heap_get_default(); // calls mi_thread_init if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; } } mi_assert_internal(mi_heap_is_initialized(heap)); - // call potential deferred free routines - _mi_deferred_free(heap, false); - - // free delayed frees from other threads (but skip contended ones) - _mi_heap_delayed_free_partial(heap); + // do administrative tasks every N generic mallocs + if mi_unlikely(++heap->generic_count >= 1000) { + heap->generic_collect_count += heap->generic_count; + heap->generic_count = 0; + // call potential deferred free routines + _mi_deferred_free(heap, false); + + // collect every once in a while (10000 by default) + const long generic_collect = mi_option_get_clamp(mi_option_generic_collect, 1, 1000000L); + if (heap->generic_collect_count >= generic_collect) { + heap->generic_collect_count = 0; + mi_heap_collect(heap, false /* force? */); + } + } // find (or allocate) a page of the right size - mi_page_t* page = mi_find_page(heap, size); + mi_page_t* page = mi_find_page(heap, size, huge_alignment); if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more - mi_heap_collect(heap, true /* force */); - page = mi_find_page(heap, size); + mi_heap_collect(heap, true /* force? */); + page = mi_find_page(heap, size, huge_alignment); } if mi_unlikely(page == NULL) { // out of memory - const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); return NULL; } mi_assert_internal(mi_page_immediate_available(page)); mi_assert_internal(mi_page_block_size(page) >= size); + mi_assert_internal(_mi_is_aligned(page, MI_PAGE_ALIGN)); + mi_assert_internal(_mi_ptr_page(page)==page); // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) - if mi_unlikely(zero && page->xblock_size == 0) { + void* p; + if mi_unlikely(zero && mi_page_is_huge(page)) { // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. - void* p = _mi_page_malloc(heap, page, size, false); + p = _mi_page_malloc(heap, page, size); mi_assert_internal(p != NULL); _mi_memzero_aligned(p, mi_page_usable_block_size(page)); - return p; } else { - return _mi_page_malloc(heap, page, size, zero); + p = _mi_page_malloc_zero(heap, page, size, zero); + mi_assert_internal(p != NULL); + } + // move singleton pages to the full queue + if (page->reserved == page->used) { + mi_page_to_full(page, mi_page_queue_of(page)); } + return p; } diff --git a/depends/mimalloc/src/prim/emscripten/prim.c b/depends/mimalloc/src/prim/emscripten/prim.c new file mode 100644 index 000000000000..9ddec5df5415 --- /dev/null +++ b/depends/mimalloc/src/prim/emscripten/prim.c @@ -0,0 +1,252 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen, Alon Zakai +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +// Design +// ====== +// +// mimalloc is built on top of emmalloc. emmalloc is a minimal allocator on top +// of sbrk. The reason for having three layers here is that we want mimalloc to +// be able to allocate and release system memory properly, the same way it would +// when using VirtualAlloc on Windows or mmap on POSIX, and sbrk is too limited. +// Specifically, sbrk can only go up and down, and not "skip" over regions, and +// so we end up either never freeing memory to the system, or we can get stuck +// with holes. +// +// Atm wasm generally does *not* free memory back the system: once grown, we do +// not shrink back down (https://github.com/WebAssembly/design/issues/1397). +// However, that is expected to improve +// (https://github.com/WebAssembly/memory-control/blob/main/proposals/memory-control/Overview.md) +// and so we do not want to bake those limitations in here. +// +// Even without that issue, we want our system allocator to handle holes, that +// is, it should merge freed regions and allow allocating new content there of +// the full size, etc., so that we do not waste space. That means that the +// system allocator really does need to handle the general problem of allocating +// and freeing variable-sized chunks of memory in a random order, like malloc/ +// free do. And so it makes sense to layer mimalloc on top of such an +// implementation. +// +// emmalloc makes sense for the lower level because it is small and simple while +// still fully handling merging of holes etc. It is not the most efficient +// allocator, but our assumption is that mimalloc needs to be fast while the +// system allocator underneath it is called much less frequently. +// + +//--------------------------------------------- +// init +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +extern void emmalloc_free(void*); + +int _mi_prim_free(void* addr, size_t size) { + if (size==0) return 0; + emmalloc_free(addr); + return 0; +} + + +//--------------------------------------------- +// Allocation +//--------------------------------------------- + +extern void* emmalloc_memalign(size_t alignment, size_t size); + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr); + *is_large = false; + // TODO: Track the highest address ever seen; first uses of it are zeroes. + // That assumes no one else uses sbrk but us (they could go up, + // scribble, and then down), but we could assert on that perhaps. + *is_zero = false; + // emmalloc has a minimum alignment size. + #define MIN_EMMALLOC_ALIGN 8 + if (try_alignment < MIN_EMMALLOC_ALIGN) { + try_alignment = MIN_EMMALLOC_ALIGN; + } + void* p = emmalloc_memalign(try_alignment, size); + *addr = p; + if (p == 0) { + return ENOMEM; + } + return 0; +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + // See TODO above. + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +mi_msecs_t _mi_prim_clock_now(void) { + return emscripten_date_now(); +} + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +#include + +void _mi_prim_out_stderr( const char* msg) { + emscripten_console_error(msg); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // For code size reasons, do not support environ customization for now. + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + int err = getentropy(buf, buf_len); + return !err; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if defined(MI_USE_PTHREADS) + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing to do +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} +#endif + +bool _mi_prim_thread_is_in_threadpool(void) { + return false; +} diff --git a/depends/mimalloc/src/alloc-override-osx.c b/depends/mimalloc/src/prim/osx/alloc-override-zone.c similarity index 93% rename from depends/mimalloc/src/alloc-override-osx.c rename to depends/mimalloc/src/prim/osx/alloc-override-zone.c index ba2313a2a06a..a8f5fbc68268 100644 --- a/depends/mimalloc/src/alloc-override-osx.c +++ b/depends/mimalloc/src/prim/osx/alloc-override-zone.c @@ -6,7 +6,7 @@ terms of the MIT license. A copy of the license can be found in the file -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" #if defined(MI_MALLOC_OVERRIDE) @@ -19,8 +19,8 @@ terms of the MIT license. A copy of the license can be found in the file This is done through the malloc zone interface. It seems to be most robust in combination with interposing though or otherwise we may get zone errors as there are could - be allocations done by the time we take over the - zone. + be allocations done by the time we take over the + zone. ------------------------------------------------------ */ #include @@ -64,7 +64,8 @@ static void* zone_valloc(malloc_zone_t* zone, size_t size) { static void zone_free(malloc_zone_t* zone, void* p) { MI_UNUSED(zone); - mi_cfree(p); + // mi_cfree(p); // checked free as `zone_free` may be called with invalid pointers + mi_free(p); // with the page_map and pagemap_commit=1 we can use the regular free } static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) { @@ -195,7 +196,7 @@ static malloc_introspection_t mi_introspect = { .log = &intro_log, .force_lock = &intro_force_lock, .force_unlock = &intro_force_unlock, -#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) .statistics = &intro_statistics, .zone_locked = &intro_zone_locked, #endif @@ -215,8 +216,8 @@ static malloc_zone_t mi_malloc_zone = { .zone_name = "mimalloc", .batch_malloc = &zone_batch_malloc, .batch_free = &zone_batch_free, - .introspect = &mi_introspect, -#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) + .introspect = &mi_introspect, +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) .version = 10, #else @@ -225,7 +226,9 @@ static malloc_zone_t mi_malloc_zone = { // switch to version 9+ on OSX 10.6 to support memalign. .memalign = &zone_memalign, .free_definite_size = &zone_free_definite_size, + #if defined(MAC_OS_X_VERSION_10_7) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7) .pressure_relief = &zone_pressure_relief, + #endif #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) .claimed_address = &zone_claimed_address, #endif @@ -242,7 +245,7 @@ static malloc_zone_t mi_malloc_zone = { #if defined(MI_OSX_INTERPOSE) && defined(MI_SHARED_LIB_EXPORT) // ------------------------------------------------------ -// Override malloc_xxx and malloc_zone_xxx api's to use only +// Override malloc_xxx and malloc_zone_xxx api's to use only // our mimalloc zone. Since even the loader uses malloc // on macOS, this ensures that all allocations go through // mimalloc (as all calls are interposed). @@ -254,7 +257,7 @@ static malloc_zone_t mi_malloc_zone = { static inline malloc_zone_t* mi_get_default_zone(void) { static bool init; - if mi_unlikely(!init) { + if mi_unlikely(!init) { init = true; malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see ) } @@ -272,7 +275,7 @@ static malloc_zone_t* mi_malloc_create_zone(vm_size_t size, unsigned flags) { return mi_get_default_zone(); } -static malloc_zone_t* mi_malloc_default_zone (void) { +static malloc_zone_t* mi_malloc_default_zone (void) { return mi_get_default_zone(); } @@ -292,11 +295,11 @@ static kern_return_t mi_malloc_get_all_zones (task_t task, memory_reader_t mr, v return KERN_SUCCESS; } -static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) { +static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) { return (zone == NULL ? mi_malloc_zone.zone_name : zone->zone_name); } -static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) { +static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) { MI_UNUSED(zone); MI_UNUSED(name); } @@ -306,7 +309,7 @@ static int mi_malloc_jumpstart(uintptr_t cookie) { } static void mi__malloc_fork_prepare(void) { - // nothing + // nothing } static void mi__malloc_fork_parent(void) { // nothing @@ -367,13 +370,13 @@ __attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[] MI_INTERPOSE_MI(malloc_destroy_zone), MI_INTERPOSE_MI(malloc_get_all_zones), MI_INTERPOSE_MI(malloc_get_zone_name), - MI_INTERPOSE_MI(malloc_jumpstart), + MI_INTERPOSE_MI(malloc_jumpstart), MI_INTERPOSE_MI(malloc_printf), MI_INTERPOSE_MI(malloc_set_zone_name), MI_INTERPOSE_MI(_malloc_fork_child), MI_INTERPOSE_MI(_malloc_fork_parent), MI_INTERPOSE_MI(_malloc_fork_prepare), - + MI_INTERPOSE_ZONE(zone_batch_free), MI_INTERPOSE_ZONE(zone_batch_malloc), MI_INTERPOSE_ZONE(zone_calloc), @@ -416,11 +419,12 @@ static inline malloc_zone_t* mi_get_default_zone(void) } #if defined(__clang__) -__attribute__((constructor(0))) +__attribute__((constructor(101))) // highest priority #else -__attribute__((constructor)) // seems not supported by g++-11 on the M1 +__attribute__((constructor)) // priority level is not supported by gcc #endif -static void _mi_macos_override_malloc() { +__attribute__((used)) +static void _mi_macos_override_malloc(void) { malloc_zone_t* purgeable_zone = NULL; #if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) diff --git a/depends/mimalloc/src/prim/osx/prim.c b/depends/mimalloc/src/prim/osx/prim.c new file mode 100644 index 000000000000..8a2f4e8aa473 --- /dev/null +++ b/depends/mimalloc/src/prim/osx/prim.c @@ -0,0 +1,9 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// We use the unix/prim.c with the mmap API on macOSX +#include "../unix/prim.c" diff --git a/depends/mimalloc/src/prim/prim.c b/depends/mimalloc/src/prim/prim.c new file mode 100644 index 000000000000..1864a3afcc1f --- /dev/null +++ b/depends/mimalloc/src/prim/prim.c @@ -0,0 +1,76 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// Select the implementation of the primitives +// depending on the OS. + +#if defined(_WIN32) +#include "windows/prim.c" // VirtualAlloc (Windows) + +#elif defined(__APPLE__) +#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c) + +#elif defined(__EMSCRIPTEN__) +#include "emscripten/prim.c" // emmalloc_*, + pthread support + +#elif defined(__wasi__) +#define MI_USE_SBRK +#include "wasi/prim.c" // memory-grow or sbrk (Wasm) + +#else +#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) + +#endif + +// Generic process initialization +#ifndef MI_PRIM_HAS_PROCESS_ATTACH +#if defined(__GNUC__) || defined(__clang__) + // gcc,clang: use the constructor/destructor attribute + // which for both seem to run before regular constructors/destructors + #if defined(__clang__) + #define mi_attr_constructor __attribute__((constructor(101))) + #define mi_attr_destructor __attribute__((destructor(101))) + #else + #define mi_attr_constructor __attribute__((constructor)) + #define mi_attr_destructor __attribute__((destructor)) + #endif + static void mi_attr_constructor mi_process_attach(void) { + _mi_auto_process_init(); + } + static void mi_attr_destructor mi_process_detach(void) { + _mi_auto_process_done(); + } +#elif defined(__cplusplus) + // C++: use static initialization to detect process start/end + // This is not guaranteed to be first/last but the best we can generally do? + struct mi_init_done_t { + mi_init_done_t() { + _mi_auto_process_init(); + } + ~mi_init_done_t() { + _mi_auto_process_done(); + } + }; + static mi_init_done_t mi_init_done; + #else + #pragma message("define a way to call _mi_auto_process_init/done on your platform") +#endif +#endif + +// Generic allocator init/done callback +#ifndef MI_PRIM_HAS_ALLOCATOR_INIT +bool _mi_is_redirected(void) { + return false; +} +bool _mi_allocator_init(const char** message) { + if (message != NULL) { *message = NULL; } + return true; +} +void _mi_allocator_done(void) { + // nothing to do +} +#endif diff --git a/depends/mimalloc/src/prim/readme.md b/depends/mimalloc/src/prim/readme.md new file mode 100644 index 000000000000..380dd3a71784 --- /dev/null +++ b/depends/mimalloc/src/prim/readme.md @@ -0,0 +1,9 @@ +## Portability Primitives + +This is the portability layer where all primitives needed from the OS are defined. + +- `include/mimalloc/prim.h`: primitive portability API definition. +- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform + (and on macOS, `osx/prim.c` defers to `unix/prim.c`). + +Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's. \ No newline at end of file diff --git a/depends/mimalloc/src/prim/unix/prim.c b/depends/mimalloc/src/prim/unix/prim.c new file mode 100644 index 000000000000..a25be2512ce1 --- /dev/null +++ b/depends/mimalloc/src/prim/unix/prim.c @@ -0,0 +1,973 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2025, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined +#endif + +#if defined(__sun) +// illumos provides new mman.h api when any of these are defined +// otherwise the old api based on caddr_t which predates the void pointers one. +// stock solaris provides only the former, chose to atomically to discard those +// flags only here rather than project wide tough. +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" + +#include // mmap +#include // sysconf +#include // open, close, read, access +#include // getenv, arc4random_buf + +#if defined(__linux__) + #include + #include // THP disable, PR_SET_VMA + #include // sysinfo + #if defined(__GLIBC__) && !defined(PR_SET_VMA) + #include + #endif + #if defined(__GLIBC__) + #include // linux mmap flags + #else + #include + #endif +#elif defined(__APPLE__) + #include + #include + #if !defined(TARGET_OS_OSX) || TARGET_OS_OSX // see issue #879, used to be (!TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR) + #include // VM_MAKE_TAG, VM_FLAGS_SUPERPAGE_SIZE_2MB, etc. + #endif + #if !defined(MAC_OS_X_VERSION_10_7) + #define MAC_OS_X_VERSION_10_7 1070 + #endif + #include +#elif defined(__FreeBSD__) || defined(__DragonFly__) + #include + #if __FreeBSD_version >= 1200000 + #include + #include + #endif + #include +#endif + +#if (defined(__linux__) && !defined(__ANDROID__)) || defined(__FreeBSD__) + #define MI_HAS_SYSCALL_H + #include +#endif + +#if !defined(MADV_DONTNEED) && defined(POSIX_MADV_DONTNEED) // QNX +#define MADV_DONTNEED POSIX_MADV_DONTNEED +#endif +#if !defined(MADV_FREE) && defined(POSIX_MADV_FREE) // QNX +#define MADV_FREE POSIX_MADV_FREE +#endif + +#define MI_UNIX_LARGE_PAGE_SIZE (2*MI_MiB) // TODO: can we query the OS for this? + +//------------------------------------------------------------------------------------ +// Use syscalls for some primitives to allow for libraries that override open/read/close etc. +// and do allocation themselves; using syscalls prevents recursion when mimalloc is +// still initializing (issue #713) +// Declare inline to avoid unused function warnings. +//------------------------------------------------------------------------------------ + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access) + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return syscall(SYS_open,fpath,open_flags,0); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return syscall(SYS_read,fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return syscall(SYS_close,fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return syscall(SYS_access,fpath,mode); +} + +#else + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return open(fpath,open_flags); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return read(fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return close(fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return access(fpath,mode); +} + +#endif + + + +//--------------------------------------------- +// init +//--------------------------------------------- + +static bool unix_detect_overcommit(void) { + bool os_overcommit = true; + #if defined(__linux__) + int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd >= 0) { + char buf[32]; + ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf)); + mi_prim_close(fd); + // + // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE) + if (nread >= 1) { + os_overcommit = (buf[0] == '0' || buf[0] == '1'); + } + } + #elif defined(__FreeBSD__) + int val = 0; + size_t olen = sizeof(val); + if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) { + os_overcommit = (val != 0); + } + #else + // default: overcommit is true + #endif + return os_overcommit; +} + +// try to detect the physical memory dynamically (if possible) +static void unix_detect_physical_memory( size_t page_size, size_t* physical_memory_in_kib ) { + #if defined(CTL_HW) && (defined(HW_PHYSMEM64) || defined(HW_MEMSIZE)) // freeBSD, macOS + MI_UNUSED(page_size); + int64_t physical_memory = 0; + size_t length = sizeof(int64_t); + #if defined(HW_PHYSMEM64) + int mib[2] = { CTL_HW, HW_PHYSMEM64 }; + #else + int mib[2] = { CTL_HW, HW_MEMSIZE }; + #endif + const int err = sysctl(mib, 2, &physical_memory, &length, NULL, 0); + if (err==0 && physical_memory > 0) { + const int64_t phys_in_kib = physical_memory / MI_KiB; + if (phys_in_kib > 0 && (uint64_t)phys_in_kib <= SIZE_MAX) { + *physical_memory_in_kib = (size_t)phys_in_kib; + } + } + #elif defined(__linux__) + MI_UNUSED(page_size); + struct sysinfo info; _mi_memzero_var(info); + const int err = sysinfo(&info); + if (err==0 && info.totalram > 0 && info.totalram <= SIZE_MAX) { + *physical_memory_in_kib = (size_t)info.totalram / MI_KiB; + } + #elif defined(_SC_PHYS_PAGES) // do not use by default as it might cause allocation (by using `fopen` to parse /proc/meminfo) (issue #1100) + const long pphys = sysconf(_SC_PHYS_PAGES); + const size_t psize_in_kib = page_size / MI_KiB; + if (psize_in_kib > 0 && pphys > 0 && (unsigned long)pphys <= SIZE_MAX && (size_t)pphys <= (SIZE_MAX/psize_in_kib)) { + *physical_memory_in_kib = (size_t)pphys * psize_in_kib; + } + #endif +} + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + long psize = sysconf(_SC_PAGESIZE); + if (psize > 0 && (unsigned long)psize < SIZE_MAX) { + config->page_size = (size_t)psize; + config->alloc_granularity = (size_t)psize; + unix_detect_physical_memory(config->page_size, &config->physical_memory_in_kib); + } + config->large_page_size = MI_UNIX_LARGE_PAGE_SIZE; + config->has_overcommit = unix_detect_overcommit(); + config->has_partial_free = true; // mmap can free in parts + config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) + + // disable transparent huge pages for this process? + #if (defined(__linux__) || defined(__ANDROID__)) && defined(PR_GET_THP_DISABLE) + #if defined(MI_NO_THP) + if (true) + #else + if (!mi_option_is_enabled(mi_option_allow_large_os_pages)) // disable THP also if large OS pages are not allowed in the options + #endif + { + int val = 0; + if (prctl(PR_GET_THP_DISABLE, &val, 0, 0, 0) != 0) { + // Most likely since distros often come with always/madvise settings. + val = 1; + // Disabling only for mimalloc process rather than touching system wide settings + (void)prctl(PR_SET_THP_DISABLE, &val, 0, 0, 0); + } + } + #endif +} + + +//--------------------------------------------- +// free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + if (size==0) return 0; + bool err = (munmap(addr, size) == -1); + return (err ? errno : 0); +} + + +//--------------------------------------------- +// mmap +//--------------------------------------------- + +static int unix_madvise(void* addr, size_t size, int advice) { + #if defined(__sun) + int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + #elif defined(__QNX__) + int res = posix_madvise(addr, size, advice); + #else + int res = madvise(addr, size, advice); + #endif + return (res==0 ? 0 : errno); +} + +static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) { + void* p = mmap(addr, size, protect_flags, flags, fd, 0 /* offset */); + #if defined(__linux__) && defined(PR_SET_VMA) + if (p!=MAP_FAILED && p!=NULL) { + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size, "mimalloc"); + } + #endif + return p; +} + +static void* unix_mmap_prim_aligned(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { + MI_UNUSED(try_alignment); + void* p = NULL; + #if defined(MAP_ALIGNED) // BSD + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + size_t n = 0; + mi_bsr(try_alignment, &n); + if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB + p = unix_mmap_prim(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + int err = errno; + _mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #elif defined(MAP_ALIGN) // Solaris + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + p = unix_mmap_prim((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd); // addr parameter is the required alignment + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + #endif + #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) + // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment, size); + if (hint != NULL) { + p = unix_mmap_prim(hint, size, protect_flags, flags, fd); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? + int err = 0; + #else + int err = errno; + #endif + _mi_trace_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #endif + // regular mmap + p = unix_mmap_prim(addr, size, protect_flags, flags, fd); + if (p!=MAP_FAILED) return p; + // failed to allocate + return NULL; +} + +static int unix_mmap_fd(void) { + #if defined(VM_MAKE_TAG) + // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) + int os_tag = (int)mi_option_get(mi_option_os_tag); + if (os_tag < 100 || os_tag > 255) { os_tag = 254; } + return VM_MAKE_TAG(os_tag); + #else + return -1; + #endif +} + +static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) { + #if !defined(MAP_ANONYMOUS) + #define MAP_ANONYMOUS MAP_ANON + #endif + #if !defined(MAP_NORESERVE) + #define MAP_NORESERVE 0 + #endif + void* p = NULL; + const int fd = unix_mmap_fd(); + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + if (_mi_os_has_overcommit()) { + flags |= MAP_NORESERVE; + } + #if defined(PROT_MAX) + protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD + #endif + // huge page allocation + if (allow_large && (large_only || (_mi_os_use_large_page(size, try_alignment) && mi_option_get(mi_option_allow_large_os_pages) == 1))) { + static _Atomic(size_t) large_page_try_ok; // = 0; + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // If the OS is not configured for large OS pages, or the user does not have + // enough permission, the `mmap` will always fail (but it might also fail for other reasons). + // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times + // to avoid too many failing calls to mmap. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux + int lfd = fd; + #ifdef MAP_ALIGNED_SUPER + lflags |= MAP_ALIGNED_SUPER; + #endif + #ifdef MAP_HUGETLB + lflags |= MAP_HUGETLB; + #endif + #ifdef MAP_HUGE_1GB + static bool mi_huge_pages_available = true; + if (large_only && (size % MI_GiB) == 0 && mi_huge_pages_available) { + lflags |= MAP_HUGE_1GB; + } + else + #endif + { + #ifdef MAP_HUGE_2MB + lflags |= MAP_HUGE_2MB; + #endif + } + #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB + lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; + #endif + if (large_only || lflags != flags) { + // try large OS page allocation + *is_large = true; + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd); + #ifdef MAP_HUGE_1GB + if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) { + mi_huge_pages_available = false; // don't try huge 1GiB pages again + if (large_only) { + _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); + } + lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, lflags, lfd); + } + #endif + if (large_only) return p; + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations + } + } + } + } + // regular allocation + if (p == NULL) { + *is_large = false; + p = unix_mmap_prim_aligned(addr, size, try_alignment, protect_flags, flags, fd); + if (p != NULL) { + #if defined(MADV_HUGEPAGE) + // Many Linux systems don't allow MAP_HUGETLB but they support instead + // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE + // though since properly aligned allocations will already use large pages if available + // in that case -- in particular for our large regions (in `memory.c`). + // However, some systems only allow THP if called with explicit `madvise`, so + // when large OS pages are enabled for mimalloc, we call `madvise` anyways. + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) { + // *is_large = true; // possibly + }; + } + #elif defined(__sun) + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + struct memcntl_mha cmd = {0}; + cmd.mha_pagesize = _mi_os_large_page_size(); + cmd.mha_cmd = MHA_MAPSIZE_VA; + if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { + // *is_large = true; // possibly + } + } + #endif + } + } + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + if (hint_addr == NULL && size >= 8*MI_UNIX_LARGE_PAGE_SIZE && try_alignment > 1 && _mi_is_power_of_two(try_alignment) && try_alignment < MI_UNIX_LARGE_PAGE_SIZE) { + try_alignment = MI_UNIX_LARGE_PAGE_SIZE; // try to align along large page size for larger allocations + } + + *is_zero = true; + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + *addr = unix_mmap(hint_addr, size, try_alignment, protect_flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : errno); +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +static void unix_mprotect_hint(int err) { + #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page + if (err == ENOMEM) { + _mi_warning_message("The next warning may be caused by a low memory map limit.\n" + " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n" + " For example: sudo sysctl -w vm.max_map_count=262144\n"); + } + #else + MI_UNUSED(err); + #endif +} + + + + + +int _mi_prim_commit(void* start, size_t size, bool* is_zero) { + // commit: ensure we can access the area + // note: we may think that *is_zero can be true since the memory + // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but + // we sometimes call commit on a range with still partially committed + // memory and `mprotect` does not zero the range. + *is_zero = false; + int err = mprotect(start, size, (PROT_READ | PROT_WRITE)); + if (err != 0) { + err = errno; + unix_mprotect_hint(err); + } + return err; +} + +int _mi_prim_reuse(void* start, size_t size) { + MI_UNUSED(start); MI_UNUSED(size); + #if defined(__APPLE__) && defined(MADV_FREE_REUSE) + return unix_madvise(start, size, MADV_FREE_REUSE); + #endif + return 0; +} + +int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { + int err = 0; + #if defined(__APPLE__) && defined(MADV_FREE_REUSABLE) + // decommit on macOS: use MADV_FREE_REUSABLE as it does immediate rss accounting (issue #1097) + err = unix_madvise(start, size, MADV_FREE_REUSABLE); + if (err) { err = unix_madvise(start, size, MADV_DONTNEED); } + #else + // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) + err = unix_madvise(start, size, MADV_DONTNEED); + #endif + #if !MI_DEBUG && MI_SECURE<=2 + *needs_recommit = false; + #else + *needs_recommit = true; + mprotect(start, size, PROT_NONE); + #endif + /* + // decommit: use mmap with MAP_FIXED and PROT_NONE to discard the existing memory (and reduce rss) + *needs_recommit = true; + const int fd = unix_mmap_fd(); + void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); + if (p != start) { err = errno; } + */ + return err; +} + +int _mi_prim_reset(void* start, size_t size) { + int err = 0; + + // on macOS can use MADV_FREE_REUSABLE (but we disable this for now as it seems slower) + #if 0 && defined(__APPLE__) && defined(MADV_FREE_REUSABLE) + err = unix_madvise(start, size, MADV_FREE_REUSABLE); + if (err==0) return 0; + // fall through + #endif + + #if defined(MADV_FREE) + // Otherwise, we try to use `MADV_FREE` as that is the fastest. A drawback though is that it + // will not reduce the `rss` stats in tools like `top` even though the memory is available + // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by + // default `MADV_DONTNEED` is used though. + static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); + int oadvice = (int)mi_atomic_load_relaxed(&advice); + while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; + if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { + // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on + mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); + err = unix_madvise(start, size, MADV_DONTNEED); + } + #else + err = unix_madvise(start, size, MADV_DONTNEED); + #endif + return err; +} + +int _mi_prim_protect(void* start, size_t size, bool protect) { + int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); + if (err != 0) { err = errno; } + unix_mprotect_hint(err); + return err; +} + + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__) + +#ifndef MPOL_PREFERRED +#define MPOL_PREFERRED 1 +#endif + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind) +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags); +} +#else +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags); + return 0; +} +#endif + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + bool is_large = true; + *is_zero = true; + *addr = unix_mmap(hint_addr, size, MI_ARENA_SLICE_ALIGN, PROT_READ | PROT_WRITE, true, true, &is_large); + if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes + unsigned long numa_mask = (1UL << numa_node); + // TODO: does `mbind` work correctly for huge OS pages? should we + // use `set_mempolicy` before calling mmap instead? + // see: + long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); + if (err != 0) { + err = errno; + _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err); + } + } + return (*addr != NULL ? 0 : errno); +} + +#else + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = false; + *addr = NULL; + return ENOMEM; +} + +#endif + +//--------------------------------------------- +// NUMA nodes +//--------------------------------------------- + +#if defined(__linux__) + +size_t _mi_prim_numa_node(void) { + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu) + unsigned long node = 0; + unsigned long ncpu = 0; + long err = syscall(SYS_getcpu, &ncpu, &node, NULL); + if (err != 0) return 0; + return node; + #else + return 0; + #endif +} + +size_t _mi_prim_numa_node_count(void) { + char buf[128]; + unsigned node = 0; + for(node = 0; node < 256; node++) { + // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation) + _mi_snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1); + if (mi_prim_access(buf,R_OK) != 0) break; + } + return (node+1); +} + +#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000 + +size_t _mi_prim_numa_node(void) { + domainset_t dom; + size_t node; + int policy; + if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul; + for (node = 0; node < MAXMEMDOM; node++) { + if (DOMAINSET_ISSET(node, &dom)) return node; + } + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ndomains = 0; + size_t len = sizeof(ndomains); + if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul; + return ndomains; +} + +#elif defined(__DragonFly__) + +size_t _mi_prim_numa_node(void) { + // TODO: DragonFly does not seem to provide any userland means to get this information. + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ncpus = 0, nvirtcoresperphys = 0; + size_t len = sizeof(size_t); + if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul; + if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul; + return nvirtcoresperphys * ncpus; +} + +#else + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + +#endif + +// ---------------------------------------------------------------- +// Clock +// ---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__) +#include +#include +#include + +#if defined(__APPLE__) +#include +#endif + +#if defined(__HAIKU__) +#include +#endif + +static mi_msecs_t timeval_secs(const struct timeval* tv) { + return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L); +} + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + struct rusage rusage; + getrusage(RUSAGE_SELF, &rusage); + pinfo->utime = timeval_secs(&rusage.ru_utime); + pinfo->stime = timeval_secs(&rusage.ru_stime); +#if !defined(__HAIKU__) + pinfo->page_faults = rusage.ru_majflt; +#endif +#if defined(__HAIKU__) + // Haiku does not have (yet?) a way to + // get these stats per process + thread_info tid; + area_info mem; + ssize_t c; + get_thread_info(find_thread(0), &tid); + while (get_next_area_info(tid.team, &c, &mem) == B_OK) { + pinfo->peak_rss += mem.ram_size; + } + pinfo->page_faults = 0; +#elif defined(__APPLE__) + pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes + #ifdef MACH_TASK_BASIC_INFO + struct mach_task_basic_info info; + mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #else + struct task_basic_info info; + mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #endif +#else + pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB +#endif + // use defaults for commit +} + +#else + +#ifndef __wasi__ +// WebAssembly instances are not processes +#pragma message("define a way to get process info") +#endif + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + +#endif + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0) +// On Posix systemsr use `environ` to access environment variables +// even before the C runtime is initialized. +#if defined(__APPLE__) && defined(__has_include) && __has_include() +#include +static char** mi_get_environ(void) { + return (*_NSGetEnviron()); +} +#else +extern char** environ; +static char** mi_get_environ(void) { + return environ; +} +#endif +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL) return false; + const size_t len = _mi_strlen(name); + if (len == 0) return false; + char** env = mi_get_environ(); + if (env == NULL) return false; + // compare up to 10000 entries + for (int i = 0; i < 10000 && env[i] != NULL; i++) { + const char* s = env[i]; + if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive + // found it + _mi_strlcpy(result, s + len + 1, result_size); + return true; + } + } + return false; +} +#else +// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} +#endif // !MI_USE_ENVIRON + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_15) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_15) +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // We prefer CCRandomGenerateBytes as it returns an error code while arc4random_buf + // may fail silently on macOS. See PR #390, and + return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); +} + +#elif defined(__ANDROID__) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ + defined(__sun) || \ + (defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7)) + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + arc4random_buf(buf, buf_len); + return true; +} + +#elif defined(__APPLE__) || defined(__linux__) || defined(__HAIKU__) // also for old apple versions < 10.7 (issue #829) + +#include +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h` + // and for the latter the actual `getrandom` call is not always defined. + // (see ) + // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed. + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getrandom) + #ifndef GRND_NONBLOCK + #define GRND_NONBLOCK (1) + #endif + static _Atomic(uintptr_t) no_getrandom; // = 0 + if (mi_atomic_load_acquire(&no_getrandom)==0) { + ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK); + if (ret >= 0) return (buf_len == (size_t)ret); + if (errno != ENOSYS) return false; + mi_atomic_store_release(&no_getrandom, (uintptr_t)1); // don't call again, and fall back to /dev/urandom + } + #endif + int flags = O_RDONLY; + #if defined(O_CLOEXEC) + flags |= O_CLOEXEC; + #endif + int fd = mi_prim_open("/dev/urandom", flags); + if (fd < 0) return false; + size_t count = 0; + while(count < buf_len) { + ssize_t ret = mi_prim_read(fd, (char*)buf + count, buf_len - count); + if (ret<=0) { + if (errno!=EAGAIN && errno!=EINTR) break; + } + else { + count += ret; + } + } + mi_prim_close(fd); + return (count==buf_len); +} + +#else + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + +#endif + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if defined(MI_USE_PTHREADS) + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809 + pthread_key_delete(_mi_heap_default_key); + } +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +#endif + +bool _mi_prim_thread_is_in_threadpool(void) { + return false; +} diff --git a/depends/mimalloc/src/prim/wasi/prim.c b/depends/mimalloc/src/prim/wasi/prim.c new file mode 100644 index 000000000000..1855a7ab3ccf --- /dev/null +++ b/depends/mimalloc/src/prim/wasi/prim.c @@ -0,0 +1,288 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" + +#include // fputs +#include // getenv + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(addr); MI_UNUSED(size); + // wasi heap cannot be shrunk + return 0; +} + + +//--------------------------------------------- +// Allocation: sbrk or memory_grow +//--------------------------------------------- + +#if defined(MI_USE_SBRK) + #include // for sbrk + + static void* mi_memory_grow( size_t size ) { + void* p = sbrk(size); + if (p == (void*)(-1)) return NULL; + #if !defined(__wasi__) // on wasi this is always zero initialized already (?) + memset(p,0,size); + #endif + return p; + } +#elif defined(__wasi__) + static void* mi_memory_grow( size_t size ) { + size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size())) + : __builtin_wasm_memory_size(0)); + if (base == SIZE_MAX) return NULL; + return (void*)(base * _mi_os_page_size()); + } +#endif + +#if defined(MI_USE_PTHREADS) +static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +static void* mi_prim_mem_grow(size_t size, size_t try_alignment) { + void* p = NULL; + if (try_alignment <= 1) { + // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + p = mi_memory_grow(size); + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + } + else { + void* base = NULL; + size_t alloc_size = 0; + // to allocate aligned use a lock to try to avoid thread interaction + // between getting the current size and actual allocation + // (also, `sbrk` is not thread safe in general) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + { + void* current = mi_memory_grow(0); // get current size + if (current != NULL) { + void* aligned_current = _mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space + alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size()); + base = mi_memory_grow(alloc_size); + } + } + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + if (base != NULL) { + p = _mi_align_up_ptr(base, try_alignment); + if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) { + // another thread used wasm_memory_grow/sbrk in-between and we do not have enough + // space after alignment. Give up (and waste the space as we cannot shrink :-( ) + // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align) + p = NULL; + } + } + } + /* + if (p == NULL) { + _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment); + errno = ENOMEM; + return NULL; + } + */ + mi_assert_internal( p == NULL || try_alignment == 0 || (uintptr_t)p % try_alignment == 0 ); + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(allow_large); MI_UNUSED(commit); MI_UNUSED(hint_addr); + *is_large = false; + *is_zero = false; + *addr = mi_prim_mem_grow(size, try_alignment); + return (*addr != NULL ? 0 : ENOMEM); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +bool _mi_prim_thread_is_in_threadpool(void) { + return false; +} diff --git a/depends/mimalloc/src/prim/windows/etw-mimalloc.wprp b/depends/mimalloc/src/prim/windows/etw-mimalloc.wprp new file mode 100644 index 000000000000..b00cd7adf228 --- /dev/null +++ b/depends/mimalloc/src/prim/windows/etw-mimalloc.wprp @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/depends/mimalloc/src/prim/windows/etw.h b/depends/mimalloc/src/prim/windows/etw.h new file mode 100644 index 000000000000..4e0a092a10f4 --- /dev/null +++ b/depends/mimalloc/src/prim/windows/etw.h @@ -0,0 +1,905 @@ +//**********************************************************************` +//* This is an include file generated by Message Compiler. *` +//* *` +//* Copyright (c) Microsoft Corporation. All Rights Reserved. *` +//**********************************************************************` +#pragma once + +//***************************************************************************** +// +// Notes on the ETW event code generated by MC: +// +// - Structures and arrays of structures are treated as an opaque binary blob. +// The caller is responsible for packing the data for the structure into a +// single region of memory, with no padding between values. The macro will +// have an extra parameter for the length of the blob. +// - Arrays of nul-terminated strings must be packed by the caller into a +// single binary blob containing the correct number of strings, with a nul +// after each string. The size of the blob is specified in characters, and +// includes the final nul. +// - Arrays of SID are treated as a single binary blob. The caller is +// responsible for packing the SID values into a single region of memory with +// no padding. +// - The length attribute on the data element in the manifest is significant +// for values with intype win:UnicodeString, win:AnsiString, or win:Binary. +// The length attribute must be specified for win:Binary, and is optional for +// win:UnicodeString and win:AnsiString (if no length is given, the strings +// are assumed to be nul-terminated). For win:UnicodeString, the length is +// measured in characters, not bytes. +// - For an array of win:UnicodeString, win:AnsiString, or win:Binary, the +// length attribute applies to every value in the array, so every value in +// the array must have the same length. The values in the array are provided +// to the macro via a single pointer -- the caller is responsible for packing +// all of the values into a single region of memory with no padding between +// values. +// - Values of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary can be generated and collected on Vista or later. +// However, they may not decode properly without the Windows 10 2018 Fall +// Update. +// - Arrays of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary must be packed by the caller into a single region of +// memory. The format for each item is a UINT16 byte-count followed by that +// many bytes of data. When providing the array to the generated macro, you +// must provide the total size of the packed array data, including the UINT16 +// sizes for each item. In the case of win:CountedUnicodeString, the data +// size is specified in WCHAR (16-bit) units. In the case of +// win:CountedAnsiString and win:CountedBinary, the data size is specified in +// bytes. +// +//***************************************************************************** + +#include +#include +#include + +#ifndef ETW_INLINE + #ifdef _ETW_KM_ + // In kernel mode, save stack space by never inlining templates. + #define ETW_INLINE DECLSPEC_NOINLINE __inline + #else + // In user mode, save code size by inlining templates as appropriate. + #define ETW_INLINE __inline + #endif +#endif // ETW_INLINE + +#if defined(__cplusplus) +extern "C" { +#endif + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_USE_KERNEL_MODE_APIS macro: +// Controls whether the generated code uses kernel-mode or user-mode APIs. +// - Set to 0 to use Windows user-mode APIs such as EventRegister. +// - Set to 1 to use Windows kernel-mode APIs such as EtwRegister. +// Default is based on whether the _ETW_KM_ macro is defined (i.e. by wdm.h). +// Note that the APIs can also be overridden directly, e.g. by setting the +// MCGEN_EVENTWRITETRANSFER or MCGEN_EVENTREGISTER macros. +// +#ifndef MCGEN_USE_KERNEL_MODE_APIS + #ifdef _ETW_KM_ + #define MCGEN_USE_KERNEL_MODE_APIS 1 + #else + #define MCGEN_USE_KERNEL_MODE_APIS 0 + #endif +#endif // MCGEN_USE_KERNEL_MODE_APIS + +// +// MCGEN_HAVE_EVENTSETINFORMATION macro: +// Controls how McGenEventSetInformation uses the EventSetInformation API. +// - Set to 0 to disable the use of EventSetInformation +// (McGenEventSetInformation will always return an error). +// - Set to 1 to directly invoke MCGEN_EVENTSETINFORMATION. +// - Set to 2 to to locate EventSetInformation at runtime via GetProcAddress +// (user-mode) or MmGetSystemRoutineAddress (kernel-mode). +// Default is determined as follows: +// - If MCGEN_EVENTSETINFORMATION has been customized, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else if the target OS version has EventSetInformation, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else set to 2 (i.e. try to dynamically locate EventSetInformation). +// Note that an McGenEventSetInformation function will only be generated if one +// or more provider in a manifest has provider traits. +// +#ifndef MCGEN_HAVE_EVENTSETINFORMATION + #ifdef MCGEN_EVENTSETINFORMATION // if MCGEN_EVENTSETINFORMATION has been customized, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #elif MCGEN_USE_KERNEL_MODE_APIS // else if using kernel-mode APIs, + #if NTDDI_VERSION >= 0x06040000 // if target OS is Windows 10 or later, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EtwSetInformation" via MmGetSystemRoutineAddress. + #endif // else (using user-mode APIs) + #else // if target OS and SDK is Windows 8 or later, + #if WINVER >= 0x0602 && defined(EVENT_FILTER_TYPE_SCHEMATIZED) + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EventSetInformation" via GetModuleHandleExW/GetProcAddress. + #endif + #endif +#endif // MCGEN_HAVE_EVENTSETINFORMATION + +// +// MCGEN Override Macros +// +// The following override macros may be defined before including this header +// to control the APIs used by this header: +// +// - MCGEN_EVENTREGISTER +// - MCGEN_EVENTUNREGISTER +// - MCGEN_EVENTSETINFORMATION +// - MCGEN_EVENTWRITETRANSFER +// +// If the the macro is undefined, the MC implementation will default to the +// corresponding ETW APIs. For example, if the MCGEN_EVENTREGISTER macro is +// undefined, the EventRegister[MyProviderName] macro will use EventRegister +// in user mode and will use EtwRegister in kernel mode. +// +// To prevent issues from conflicting definitions of these macros, the value +// of the override macro will be used as a suffix in certain internal function +// names. Because of this, the override macros must follow certain rules: +// +// - The macro must be defined before any MC-generated header is included and +// must not be undefined or redefined after any MC-generated header is +// included. Different translation units (i.e. different .c or .cpp files) +// may set the macros to different values, but within a translation unit +// (within a single .c or .cpp file), the macro must be set once and not +// changed. +// - The override must be an object-like macro, not a function-like macro +// (i.e. the override macro must not have a parameter list). +// - The override macro's value must be a simple identifier, i.e. must be +// something that starts with a letter or '_' and contains only letters, +// numbers, and '_' characters. +// - If the override macro's value is the name of a second object-like macro, +// the second object-like macro must follow the same rules. (The override +// macro's value can also be the name of a function-like macro, in which +// case the function-like macro does not need to follow the same rules.) +// +// For example, the following will cause compile errors: +// +// #define MCGEN_EVENTWRITETRANSFER MyNamespace::MyClass::MyFunction // Value has non-identifier characters (colon). +// #define MCGEN_EVENTWRITETRANSFER GetEventWriteFunctionPointer(7) // Value has non-identifier characters (parentheses). +// #define MCGEN_EVENTWRITETRANSFER(h,e,a,r,c,d) EventWrite(h,e,c,d) // Override is defined as a function-like macro. +// #define MY_OBJECT_LIKE_MACRO MyNamespace::MyClass::MyEventWriteFunction +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // Evaluates to something with non-identifier characters (colon). +// +// The following would be ok: +// +// #define MCGEN_EVENTWRITETRANSFER MyEventWriteFunction1 // OK, suffix will be "MyEventWriteFunction1". +// #define MY_OBJECT_LIKE_MACRO MyEventWriteFunction2 +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // OK, suffix will be "MyEventWriteFunction2". +// #define MY_FUNCTION_LIKE_MACRO(h,e,a,r,c,d) MyNamespace::MyClass::MyEventWriteFunction3(h,e,c,d) +// #define MCGEN_EVENTWRITETRANSFER MY_FUNCTION_LIKE_MACRO // OK, suffix will be "MY_FUNCTION_LIKE_MACRO". +// +#ifndef MCGEN_EVENTREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTREGISTER EtwRegister + #else + #define MCGEN_EVENTREGISTER EventRegister + #endif +#endif // MCGEN_EVENTREGISTER +#ifndef MCGEN_EVENTUNREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTUNREGISTER EtwUnregister + #else + #define MCGEN_EVENTUNREGISTER EventUnregister + #endif +#endif // MCGEN_EVENTUNREGISTER +#ifndef MCGEN_EVENTSETINFORMATION + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTSETINFORMATION EtwSetInformation + #else + #define MCGEN_EVENTSETINFORMATION EventSetInformation + #endif +#endif // MCGEN_EVENTSETINFORMATION +#ifndef MCGEN_EVENTWRITETRANSFER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTWRITETRANSFER EtwWriteTransfer + #else + #define MCGEN_EVENTWRITETRANSFER EventWriteTransfer + #endif +#endif // MCGEN_EVENTWRITETRANSFER + +// +// MCGEN_EVENT_ENABLED macro: +// Override to control how the EventWrite[EventName] macros determine whether +// an event is enabled. The default behavior is for EventWrite[EventName] to +// use the EventEnabled[EventName] macros. +// +#ifndef MCGEN_EVENT_ENABLED +#define MCGEN_EVENT_ENABLED(EventName) EventEnabled##EventName() +#endif + +// +// MCGEN_EVENT_ENABLED_FORCONTEXT macro: +// Override to control how the EventWrite[EventName]_ForContext macros +// determine whether an event is enabled. The default behavior is for +// EventWrite[EventName]_ForContext to use the +// EventEnabled[EventName]_ForContext macros. +// +#ifndef MCGEN_EVENT_ENABLED_FORCONTEXT +#define MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, EventName) EventEnabled##EventName##_ForContext(pContext) +#endif + +// +// MCGEN_ENABLE_CHECK macro: +// Determines whether the specified event would be considered as enabled +// based on the state of the specified context. Slightly faster than calling +// McGenEventEnabled directly. +// +#ifndef MCGEN_ENABLE_CHECK +#define MCGEN_ENABLE_CHECK(Context, Descriptor) (Context.IsEnabled && McGenEventEnabled(&Context, &Descriptor)) +#endif + +#if !defined(MCGEN_TRACE_CONTEXT_DEF) +#define MCGEN_TRACE_CONTEXT_DEF +// This structure is for use by MC-generated code and should not be used directly. +typedef struct _MCGEN_TRACE_CONTEXT +{ + TRACEHANDLE RegistrationHandle; + TRACEHANDLE Logger; // Used as pointer to provider traits. + ULONGLONG MatchAnyKeyword; + ULONGLONG MatchAllKeyword; + ULONG Flags; + ULONG IsEnabled; + UCHAR Level; + UCHAR Reserve; + USHORT EnableBitsCount; + PULONG EnableBitMask; + const ULONGLONG* EnableKeyWords; + const UCHAR* EnableLevel; +} MCGEN_TRACE_CONTEXT, *PMCGEN_TRACE_CONTEXT; +#endif // MCGEN_TRACE_CONTEXT_DEF + +#if !defined(MCGEN_LEVEL_KEYWORD_ENABLED_DEF) +#define MCGEN_LEVEL_KEYWORD_ENABLED_DEF +// +// Determines whether an event with a given Level and Keyword would be +// considered as enabled based on the state of the specified context. +// Note that you may want to use MCGEN_ENABLE_CHECK instead of calling this +// function directly. +// +FORCEINLINE +BOOLEAN +McGenLevelKeywordEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ UCHAR Level, + _In_ ULONGLONG Keyword + ) +{ + // + // Check if the event Level is lower than the level at which + // the channel is enabled. + // If the event Level is 0 or the channel is enabled at level 0, + // all levels are enabled. + // + + if ((Level <= EnableInfo->Level) || // This also covers the case of Level == 0. + (EnableInfo->Level == 0)) { + + // + // Check if Keyword is enabled + // + + if ((Keyword == (ULONGLONG)0) || + ((Keyword & EnableInfo->MatchAnyKeyword) && + ((Keyword & EnableInfo->MatchAllKeyword) == EnableInfo->MatchAllKeyword))) { + return TRUE; + } + } + + return FALSE; +} +#endif // MCGEN_LEVEL_KEYWORD_ENABLED_DEF + +#if !defined(MCGEN_EVENT_ENABLED_DEF) +#define MCGEN_EVENT_ENABLED_DEF +// +// Determines whether the specified event would be considered as enabled based +// on the state of the specified context. Note that you may want to use +// MCGEN_ENABLE_CHECK instead of calling this function directly. +// +FORCEINLINE +BOOLEAN +McGenEventEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ PCEVENT_DESCRIPTOR EventDescriptor + ) +{ + return McGenLevelKeywordEnabled(EnableInfo, EventDescriptor->Level, EventDescriptor->Keyword); +} +#endif // MCGEN_EVENT_ENABLED_DEF + +#if !defined(MCGEN_CONTROL_CALLBACK) +#define MCGEN_CONTROL_CALLBACK + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +VOID +__stdcall +McGenControlCallbackV2( + _In_ LPCGUID SourceId, + _In_ ULONG ControlCode, + _In_ UCHAR Level, + _In_ ULONGLONG MatchAnyKeyword, + _In_ ULONGLONG MatchAllKeyword, + _In_opt_ PEVENT_FILTER_DESCRIPTOR FilterData, + _Inout_opt_ PVOID CallbackContext + ) +/*++ + +Routine Description: + + This is the notification callback for Windows Vista and later. + +Arguments: + + SourceId - The GUID that identifies the session that enabled the provider. + + ControlCode - The parameter indicates whether the provider + is being enabled or disabled. + + Level - The level at which the event is enabled. + + MatchAnyKeyword - The bitmask of keywords that the provider uses to + determine the category of events that it writes. + + MatchAllKeyword - This bitmask additionally restricts the category + of events that the provider writes. + + FilterData - The provider-defined data. + + CallbackContext - The context of the callback that is defined when the provider + called EtwRegister to register itself. + +Remarks: + + ETW calls this function to notify provider of enable/disable + +--*/ +{ + PMCGEN_TRACE_CONTEXT Ctx = (PMCGEN_TRACE_CONTEXT)CallbackContext; + ULONG Ix; +#ifndef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + UNREFERENCED_PARAMETER(SourceId); + UNREFERENCED_PARAMETER(FilterData); +#endif + + if (Ctx == NULL) { + return; + } + + switch (ControlCode) { + + case EVENT_CONTROL_CODE_ENABLE_PROVIDER: + Ctx->Level = Level; + Ctx->MatchAnyKeyword = MatchAnyKeyword; + Ctx->MatchAllKeyword = MatchAllKeyword; + Ctx->IsEnabled = EVENT_CONTROL_CODE_ENABLE_PROVIDER; + + for (Ix = 0; Ix < Ctx->EnableBitsCount; Ix += 1) { + if (McGenLevelKeywordEnabled(Ctx, Ctx->EnableLevel[Ix], Ctx->EnableKeyWords[Ix]) != FALSE) { + Ctx->EnableBitMask[Ix >> 5] |= (1 << (Ix % 32)); + } else { + Ctx->EnableBitMask[Ix >> 5] &= ~(1 << (Ix % 32)); + } + } + break; + + case EVENT_CONTROL_CODE_DISABLE_PROVIDER: + Ctx->IsEnabled = EVENT_CONTROL_CODE_DISABLE_PROVIDER; + Ctx->Level = 0; + Ctx->MatchAnyKeyword = 0; + Ctx->MatchAllKeyword = 0; + if (Ctx->EnableBitsCount > 0) { +#pragma warning(suppress: 26451) // Arithmetic overflow cannot occur, no matter the value of EnableBitCount + RtlZeroMemory(Ctx->EnableBitMask, (((Ctx->EnableBitsCount - 1) / 32) + 1) * sizeof(ULONG)); + } + break; + + default: + break; + } + +#ifdef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + // + // Call user defined callback + // + MCGEN_PRIVATE_ENABLE_CALLBACK_V2( + SourceId, + ControlCode, + Level, + MatchAnyKeyword, + MatchAllKeyword, + FilterData, + CallbackContext + ); +#endif // MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + + return; +} + +#endif // MCGEN_CONTROL_CALLBACK + +#ifndef _mcgen_PENABLECALLBACK + #if MCGEN_USE_KERNEL_MODE_APIS + #define _mcgen_PENABLECALLBACK PETWENABLECALLBACK + #else + #define _mcgen_PENABLECALLBACK PENABLECALLBACK + #endif +#endif // _mcgen_PENABLECALLBACK + +#if !defined(_mcgen_PASTE2) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE2(a, b) _mcgen_PASTE2_imp(a, b) +#define _mcgen_PASTE2_imp(a, b) a##b +#endif // _mcgen_PASTE2 + +#if !defined(_mcgen_PASTE3) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE3(a, b, c) _mcgen_PASTE3_imp(a, b, c) +#define _mcgen_PASTE3_imp(a, b, c) a##b##_##c +#endif // _mcgen_PASTE3 + +// +// Macro validation +// + +// Validate MCGEN_EVENTREGISTER: + +// Trigger an error if MCGEN_EVENTREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER); + +// Trigger an error if MCGEN_EVENTREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER) + MCGEN_EVENTREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTREGISTER); + +// Validate MCGEN_EVENTUNREGISTER: + +// Trigger an error if MCGEN_EVENTUNREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER); + +// Trigger an error if MCGEN_EVENTUNREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER) + MCGEN_EVENTUNREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTUNREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTUNREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTUNREGISTER); + +// Validate MCGEN_EVENTSETINFORMATION: + +// Trigger an error if MCGEN_EVENTSETINFORMATION is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION); + +// Trigger an error if MCGEN_EVENTSETINFORMATION is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION) + MCGEN_EVENTSETINFORMATION_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTSETINFORMATION is defined as a function-like macro: +typedef void MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_MCGEN_EVENTSETINFORMATION; +typedef int _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_, MCGEN_EVENTSETINFORMATION); + +// Validate MCGEN_EVENTWRITETRANSFER: + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER); + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER) + MCGEN_EVENTWRITETRANSFER_must_not_be_redefined_between_headers;; + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is defined as a function-like macro: +typedef void MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_MCGEN_EVENTWRITETRANSFER; +typedef int _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_, MCGEN_EVENTWRITETRANSFER); + +#ifndef McGenEventWrite_def +#define McGenEventWrite_def + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventWrite _mcgen_PASTE2(McGenEventWrite_, MCGEN_EVENTWRITETRANSFER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventWrite( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_opt_ LPCGUID ActivityId, + _In_range_(1, 128) ULONG EventDataCount, + _Pre_cap_(EventDataCount) EVENT_DATA_DESCRIPTOR* EventData + ) +{ + const USHORT UNALIGNED* Traits; + + // Some customized MCGEN_EVENTWRITETRANSFER macros might ignore ActivityId. + UNREFERENCED_PARAMETER(ActivityId); + + Traits = (const USHORT UNALIGNED*)(UINT_PTR)Context->Logger; + + if (Traits == NULL) { + EventData[0].Ptr = 0; + EventData[0].Size = 0; + EventData[0].Reserved = 0; + } else { + EventData[0].Ptr = (ULONG_PTR)Traits; + EventData[0].Size = *Traits; + EventData[0].Reserved = 2; // EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA + } + + return MCGEN_EVENTWRITETRANSFER( + Context->RegistrationHandle, + Descriptor, + ActivityId, + NULL, + EventDataCount, + EventData); +} +#endif // McGenEventWrite_def + +#if !defined(McGenEventRegisterUnregister) +#define McGenEventRegisterUnregister + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventRegister _mcgen_PASTE2(McGenEventRegister_, MCGEN_EVENTREGISTER) + +#pragma warning(push) +#pragma warning(disable:6103) +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventRegister( + _In_ LPCGUID ProviderId, + _In_opt_ _mcgen_PENABLECALLBACK EnableCallback, + _In_opt_ PVOID CallbackContext, + _Inout_ PREGHANDLE RegHandle + ) +/*++ + +Routine Description: + + This function registers the provider with ETW. + +Arguments: + + ProviderId - Provider ID to register with ETW. + + EnableCallback - Callback to be used. + + CallbackContext - Context for the callback. + + RegHandle - Pointer to registration handle. + +Remarks: + + Should not be called if the provider is already registered (i.e. should not + be called if *RegHandle != 0). Repeatedly registering a provider is a bug + and may indicate a race condition. However, for compatibility with previous + behavior, this function will return SUCCESS in this case. + +--*/ +{ + ULONG Error; + + if (*RegHandle != 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTREGISTER(ProviderId, EnableCallback, CallbackContext, RegHandle); + } + + return Error; +} +#pragma warning(pop) + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventUnregister _mcgen_PASTE2(McGenEventUnregister_, MCGEN_EVENTUNREGISTER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventUnregister(_Inout_ PREGHANDLE RegHandle) +/*++ + +Routine Description: + + Unregister from ETW and set *RegHandle = 0. + +Arguments: + + RegHandle - the pointer to the provider registration handle + +Remarks: + + If provider has not been registered (i.e. if *RegHandle == 0), + return SUCCESS. It is safe to call McGenEventUnregister even if the + call to McGenEventRegister returned an error. + +--*/ +{ + ULONG Error; + + if(*RegHandle == 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTUNREGISTER(*RegHandle); + *RegHandle = (REGHANDLE)0; + } + + return Error; +} + +#endif // McGenEventRegisterUnregister + +#ifndef _mcgen_EVENT_BIT_SET + #if defined(_M_IX86) || defined(_M_X64) + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((((const unsigned char*)EnableBits)[BitPosition >> 3] & (1u << (BitPosition & 7))) != 0) + #else // CPU type + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((EnableBits[BitPosition >> 5] & (1u << (BitPosition & 31))) != 0) + #endif // CPU type +#endif // _mcgen_EVENT_BIT_SET + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +// Provider "microsoft-windows-mimalloc" event count 2 +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +// Provider GUID = 138f4dbb-ee04-4899-aa0a-572ad4475779 +EXTERN_C __declspec(selectany) const GUID ETW_MI_Provider = {0x138f4dbb, 0xee04, 0x4899, {0xaa, 0x0a, 0x57, 0x2a, 0xd4, 0x47, 0x57, 0x79}}; + +#ifndef ETW_MI_Provider_Traits +#define ETW_MI_Provider_Traits NULL +#endif // ETW_MI_Provider_Traits + +// +// Event Descriptors +// +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_ALLOC = {0x64, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_ALLOC_value 0x64 +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_FREE = {0x65, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_FREE_value 0x65 + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Event Enablement Bits +// These variables are for use by MC-generated code and should not be used directly. +// +EXTERN_C __declspec(selectany) DECLSPEC_CACHEALIGN ULONG microsoft_windows_mimallocEnableBits[1]; +EXTERN_C __declspec(selectany) const ULONGLONG microsoft_windows_mimallocKeywords[1] = {0x0}; +EXTERN_C __declspec(selectany) const unsigned char microsoft_windows_mimallocLevels[1] = {4}; + +// +// Provider context +// +EXTERN_C __declspec(selectany) MCGEN_TRACE_CONTEXT ETW_MI_Provider_Context = {0, (ULONG_PTR)ETW_MI_Provider_Traits, 0, 0, 0, 0, 0, 0, 1, microsoft_windows_mimallocEnableBits, microsoft_windows_mimallocKeywords, microsoft_windows_mimallocLevels}; + +// +// Provider REGHANDLE +// +#define microsoft_windows_mimallocHandle (ETW_MI_Provider_Context.RegistrationHandle) + +// +// This macro is set to 0, indicating that the EventWrite[Name] macros do not +// have an Activity parameter. This is controlled by the -km and -um options. +// +#define ETW_MI_Provider_EventWriteActivity 0 + +// +// Register with ETW using the control GUID specified in the manifest. +// Invoke this macro during module initialization (i.e. program startup, +// DLL process attach, or driver load) to initialize the provider. +// Note that if this function returns an error, the error means that +// will not work, but no action needs to be taken -- even if EventRegister +// returns an error, it is generally safe to use EventWrite and +// EventUnregister macros (they will be no-ops if EventRegister failed). +// +#ifndef EventRegistermicrosoft_windows_mimalloc +#define EventRegistermicrosoft_windows_mimalloc() McGenEventRegister(&ETW_MI_Provider, McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Register with ETW using a specific control GUID (i.e. a GUID other than what +// is specified in the manifest). Advanced scenarios only. +// +#ifndef EventRegisterByGuidmicrosoft_windows_mimalloc +#define EventRegisterByGuidmicrosoft_windows_mimalloc(Guid) McGenEventRegister(&(Guid), McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Unregister with ETW and close the provider. +// Invoke this macro during module shutdown (i.e. program exit, DLL process +// detach, or driver unload) to unregister the provider. +// Note that you MUST call EventUnregister before DLL or driver unload +// (not optional): failure to unregister a provider before DLL or driver unload +// will result in crashes. +// +#ifndef EventUnregistermicrosoft_windows_mimalloc +#define EventUnregistermicrosoft_windows_mimalloc() McGenEventUnregister(µsoft_windows_mimallocHandle) +#endif + +// +// MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION macro: +// Define this macro to enable support for caller-allocated provider context. +// +#ifdef MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Advanced scenarios: Caller-allocated provider context. +// Use when multiple differently-configured provider handles are needed, +// e.g. for container-aware drivers, one context per container. +// +// Usage: +// +// - Caller enables the feature before including this header, e.g. +// #define MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION 1 +// - Caller allocates memory, e.g. pContext = malloc(sizeof(McGenContext_microsoft_windows_mimalloc)); +// - Caller registers the provider, e.g. EventRegistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller writes events, e.g. EventWriteMyEvent_ForContext(pContext, ...); +// - Caller unregisters, e.g. EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller frees memory, e.g. free(pContext); +// + +typedef struct tagMcGenContext_microsoft_windows_mimalloc { + // The fields of this structure are subject to change and should + // not be accessed directly. To access the provider's REGHANDLE, + // use microsoft_windows_mimallocHandle_ForContext(pContext). + MCGEN_TRACE_CONTEXT Context; + ULONG EnableBits[1]; +} McGenContext_microsoft_windows_mimalloc; + +#define EventRegistermicrosoft_windows_mimalloc_ForContext(pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&ETW_MI_Provider, pContext) +#define EventRegisterByGuidmicrosoft_windows_mimalloc_ForContext(Guid, pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&(Guid), pContext) +#define EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext) McGenEventUnregister(&(pContext)->Context.RegistrationHandle) + +// +// Provider REGHANDLE for caller-allocated context. +// +#define microsoft_windows_mimallocHandle_ForContext(pContext) ((pContext)->Context.RegistrationHandle) + +// This function is for use by MC-generated code and should not be used directly. +// Initialize and register the caller-allocated context. +__inline +ULONG __stdcall +_mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)( + _In_ LPCGUID pProviderId, + _Out_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + RtlZeroMemory(pContext, sizeof(*pContext)); + pContext->Context.Logger = (ULONG_PTR)ETW_MI_Provider_Traits; + pContext->Context.EnableBitsCount = 1; + pContext->Context.EnableBitMask = pContext->EnableBits; + pContext->Context.EnableKeyWords = microsoft_windows_mimallocKeywords; + pContext->Context.EnableLevel = microsoft_windows_mimallocLevels; + return McGenEventRegister( + pProviderId, + McGenControlCallbackV2, + &pContext->Context, + &pContext->Context.RegistrationHandle); +} + +// This function is for use by MC-generated code and should not be used directly. +// Trigger a compile error if called with the wrong parameter type. +FORCEINLINE +_Ret_ McGenContext_microsoft_windows_mimalloc* +_mcgen_CheckContextType_microsoft_windows_mimalloc(_In_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + return pContext; +} + +#endif // MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Enablement check macro for event "ETW_MI_ALLOC" +// +#define EventEnabledETW_MI_ALLOC() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_ALLOC_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_ALLOC" +// +#define EventWriteETW_MI_ALLOC(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) +#define EventWriteETW_MI_ALLOC_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&(pContext)->Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_ALLOC, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +// +// Enablement check macro for event "ETW_MI_FREE" +// +#define EventEnabledETW_MI_FREE() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_FREE_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_FREE" +// +#define EventWriteETW_MI_FREE(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) +#define EventWriteETW_MI_FREE_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&(pContext)->Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_FREE, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_FREE _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Template Functions +// + +// +// Function for template "ETW_CUSTOM_HEAP_ALLOC_DATA" (and possibly others). +// This function is for use by MC-generated code and should not be used directly. +// +#ifndef McTemplateU0xx_def +#define McTemplateU0xx_def +ETW_INLINE +ULONG +_mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_ const unsigned __int64 _Arg0, + _In_ const unsigned __int64 _Arg1 + ) +{ +#define McTemplateU0xx_ARGCOUNT 2 + + EVENT_DATA_DESCRIPTOR EventData[McTemplateU0xx_ARGCOUNT + 1]; + + EventDataDescCreate(&EventData[1],&_Arg0, sizeof(const unsigned __int64) ); + + EventDataDescCreate(&EventData[2],&_Arg1, sizeof(const unsigned __int64) ); + + return McGenEventWrite(Context, Descriptor, NULL, McTemplateU0xx_ARGCOUNT + 1, EventData); +} +#endif // McTemplateU0xx_def + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +#if defined(__cplusplus) +} +#endif diff --git a/depends/mimalloc/src/prim/windows/etw.man b/depends/mimalloc/src/prim/windows/etw.man new file mode 100644 index 0000000000000000000000000000000000000000..cfd1f8a9eaacd50af63f1e28f9540aa88c20f90c GIT binary patch literal 3926 zcmeH~T~8B16o${WiT`2c+NGc<*i;EYh$d8xl<0*CS-Mb~vPP(R>hsQY*nU!q z$b})3?##}d?{nTW+uy%xwr*doYaNU1!Vc}sa%7F%g+hVAmL$hwL?4dodnmuAKhUXm0)X9|WHj>XRahh@~SWDIkbduX;B#u6^Q>@U= zDO8U+KXa0*th&%f*z^Udh4ol@uE=Q&`emUsh_CA`FOXgI{i-`XZ9C#bR1yBm=PJ*p z9kVN$J6O;h;8HY>p)RnhY8A#Hb?z)_!y(Iaen(I)@-9CrSSp(;_Jn9I*$S&ATjP1? zVxB>pV@LVsy;^jZr7r$HNAm06TcUiI`l@~F$Mt$E%Shfd3O+h1vFhR9a8yQZ@wpne zr3bI-p=VEdo{)#uWxSVJeYQF|-5tnq>~f+CP~A0&{v=(up=ngEDl>5!$EDwPRaNjW zXj|wbG$OwmwaW-hMvBK%pbm3wpic71O^dzbxT%*13+SP9h- zI~rA5hapTVnk|qmiIVYy{__+x9f7OV4j3`g4;{{8_SWnLBSu2PUc%~`t%Ae^>J`SS zdtZg-r<0xAH*_ALtK;Nv(d9nbKK1jK=Ld)I(jQrKhBjgToR#Wm8{0a}@6ZuEO(lvG=y=Jh{M!4!-$(E)!vYUrf47 znf4W$e&QFOovV_$>J%X*KN>oXI@jt%BJms>IU}I$<7Hr{PChchs%+mx{% zt(fa_PM4r63?1h#YOk~;byc5`>%q>sLHA1gohNq{qOREhxu<y~`}YVhGe0?R_ceQ8vt^BpSNp6kErj_0hUNFyWQ1IOZ|GEgWBPwYFLgFu Oo!*uqEBu%Ae18C_t1cS= literal 0 HcmV?d00001 diff --git a/depends/mimalloc/src/prim/windows/prim.c b/depends/mimalloc/src/prim/windows/prim.c new file mode 100644 index 000000000000..6ac32cea09e4 --- /dev/null +++ b/depends/mimalloc/src/prim/windows/prim.c @@ -0,0 +1,915 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" +#include // fputs, stderr + +// xbox has no console IO +#if !defined(WINAPI_FAMILY_PARTITION) || WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM) +#define MI_HAS_CONSOLE_IO +#endif + +//--------------------------------------------- +// Dynamically bind Windows API points for portability +//--------------------------------------------- + +#if defined(_MSC_VER) +#pragma warning(disable:4996) // don't use GetVersionExW +#endif + +static DWORD win_major_version = 6; +static DWORD win_minor_version = 0; + +// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. +// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) +// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) +// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's. +typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E { + MiMemExtendedParameterInvalidType = 0, + MiMemExtendedParameterAddressRequirements, + MiMemExtendedParameterNumaNode, + MiMemExtendedParameterPartitionHandle, + MiMemExtendedParameterUserPhysicalHandle, + MiMemExtendedParameterAttributeFlags, + MiMemExtendedParameterMax +} MI_MEM_EXTENDED_PARAMETER_TYPE; + +typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S { + struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type; + union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg; +} MI_MEM_EXTENDED_PARAMETER; + +typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S { + PVOID LowestStartingAddress; + PVOID HighestEndingAddress; + SIZE_T Alignment; +} MI_MEM_ADDRESS_REQUIREMENTS; + +#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010 + +#include +typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +typedef LONG (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); // avoid NTSTATUS as it is not defined on xbox (pr #1084) +static PVirtualAlloc2 pVirtualAlloc2 = NULL; +static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; + +// Similarly, GetNumaProcessorNodeEx is only supported since Windows 7 (and GetNumaNodeProcessorMask is not supported on xbox) +typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; + +typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); +typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber); +typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask); +typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber); +typedef BOOL (__stdcall* PGetNumaNodeProcessorMask)(UCHAR Node, PULONGLONG ProcessorMask); +typedef BOOL (__stdcall* PGetNumaHighestNodeNumber)(PULONG Node); +static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL; +static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL; +static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL; +static PGetNumaProcessorNode pGetNumaProcessorNode = NULL; +static PGetNumaNodeProcessorMask pGetNumaNodeProcessorMask = NULL; +static PGetNumaHighestNodeNumber pGetNumaHighestNodeNumber = NULL; + +// Not available on xbox +typedef SIZE_T(__stdcall* PGetLargePageMinimum)(VOID); +static PGetLargePageMinimum pGetLargePageMinimum = NULL; + +// Available after Windows XP +typedef BOOL (__stdcall *PGetPhysicallyInstalledSystemMemory)( PULONGLONG TotalMemoryInKilobytes ); +typedef BOOL (__stdcall* PGetVersionExW)(LPOSVERSIONINFOW lpVersionInformation); + + +//--------------------------------------------- +// Enable large page support dynamically (if possible) +//--------------------------------------------- + +static bool win_enable_large_os_pages(size_t* large_page_size) +{ + static bool large_initialized = false; + if (large_initialized) return (_mi_os_large_page_size() > 0); + large_initialized = true; + if (pGetLargePageMinimum==NULL) return false; // no large page support (xbox etc.) + + // Try to see if large OS pages are supported + // To use large pages on Windows, we first need access permission + // Set "Lock pages in memory" permission in the group policy editor + // + unsigned long err = 0; + HANDLE token = NULL; + BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token); + if (ok) { + TOKEN_PRIVILEGES tp; + ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid); + if (ok) { + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); + if (ok) { + err = GetLastError(); + ok = (err == ERROR_SUCCESS); + if (ok && large_page_size != NULL && pGetLargePageMinimum != NULL) { + *large_page_size = (*pGetLargePageMinimum)(); + } + } + } + CloseHandle(token); + } + if (!ok) { + if (err == 0) err = GetLastError(); + _mi_warning_message("cannot enable large OS page support, error %lu\n", err); + } + return (ok!=0); +} + + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +static DWORD win_allocation_granularity = 64*MI_KiB; + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = true; + + // get the page size + SYSTEM_INFO si; _mi_memzero_var(si); + GetSystemInfo(&si); + if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; } + if (si.dwAllocationGranularity > 0) { + config->alloc_granularity = si.dwAllocationGranularity; + win_allocation_granularity = si.dwAllocationGranularity; + } + // get virtual address bits + if ((uintptr_t)si.lpMaximumApplicationAddress > 0) { + const size_t vbits = MI_SIZE_BITS - mi_clz((uintptr_t)si.lpMaximumApplicationAddress); + config->virtual_address_bits = vbits; + } + + // get the VirtualAlloc2 function + HINSTANCE hDll = LoadLibrary(TEXT("kernelbase.dll")); + if (hDll != NULL) { + // use VirtualAlloc2FromApp if possible as it is available to Windows store apps + pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp"); + if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2"); + FreeLibrary(hDll); + } + // NtAllocateVirtualMemoryEx is used for huge page allocation + hDll = LoadLibrary(TEXT("ntdll.dll")); + if (hDll != NULL) { + pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); + FreeLibrary(hDll); + } + // Try to use Win7+ numa API + hDll = LoadLibrary(TEXT("kernel32.dll")); + if (hDll != NULL) { + pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx"); + pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx"); + pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx"); + pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode"); + pGetNumaNodeProcessorMask = (PGetNumaNodeProcessorMask)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMask"); + pGetNumaHighestNodeNumber = (PGetNumaHighestNodeNumber)(void (*)(void))GetProcAddress(hDll, "GetNumaHighestNodeNumber"); + pGetLargePageMinimum = (PGetLargePageMinimum)(void (*)(void))GetProcAddress(hDll, "GetLargePageMinimum"); + // Get physical memory (not available on XP, so check dynamically) + PGetPhysicallyInstalledSystemMemory pGetPhysicallyInstalledSystemMemory = (PGetPhysicallyInstalledSystemMemory)(void (*)(void))GetProcAddress(hDll,"GetPhysicallyInstalledSystemMemory"); + if (pGetPhysicallyInstalledSystemMemory != NULL) { + ULONGLONG memInKiB = 0; + if ((*pGetPhysicallyInstalledSystemMemory)(&memInKiB)) { + if (memInKiB > 0 && memInKiB <= SIZE_MAX) { + config->physical_memory_in_kib = (size_t)memInKiB; + } + } + } + // Get Windows version + PGetVersionExW pGetVersionExW = (PGetVersionExW)(void (*)(void))GetProcAddress(hDll, "GetVersionExW"); + if (pGetVersionExW != NULL) { + OSVERSIONINFOW version; _mi_memzero_var(version); + version.dwOSVersionInfoSize = sizeof(version); + if ((*pGetVersionExW)(&version)) { + win_major_version = version.dwMajorVersion; + win_minor_version = version.dwMinorVersion; + } + } + FreeLibrary(hDll); + } + // Enable large/huge OS page support? + if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + win_enable_large_os_pages(&config->large_page_size); + } +} + + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(size); + DWORD errcode = 0; + bool err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + if (errcode == ERROR_INVALID_ADDRESS) { + // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside + // the memory region returned by VirtualAlloc; in that case we need to free using + // the start of the region. + MEMORY_BASIC_INFORMATION info; _mi_memzero_var(info); + VirtualQuery(addr, &info, sizeof(info)); + if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)(4*MI_MiB)) { + errcode = 0; + err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + } + } + return (int)errcode; +} + + +//--------------------------------------------- +// VirtualAlloc +//--------------------------------------------- + +static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_alignment, DWORD flags) { + #if (MI_INTPTR_SIZE >= 8) + // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment,size); + if (hint != NULL) { + void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE); + if (p != NULL) return p; + _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags); + // fall through on error + } + } + #endif + // on modern Windows try use VirtualAlloc2 for aligned allocation + if (addr == NULL && try_alignment > win_allocation_granularity && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { + MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 }; + reqs.Alignment = try_alignment; + MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} }; + param.Type.Type = MiMemExtendedParameterAddressRequirements; + param.Arg.Pointer = &reqs; + void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); + if (p != NULL) return p; + _mi_warning_message("unable to allocate aligned OS memory (0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags); + // fall through on error + } + // last resort + return VirtualAlloc(addr, size, flags, PAGE_READWRITE); +} + +static bool win_is_out_of_memory_error(DWORD err) { + switch (err) { + case ERROR_COMMITMENT_MINIMUM: + case ERROR_COMMITMENT_LIMIT: + case ERROR_PAGEFILE_QUOTA: + case ERROR_NOT_ENOUGH_MEMORY: + return true; + default: + return false; + } +} + +static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) { + long max_retry_msecs = mi_option_get_clamp(mi_option_retry_on_oom, 0, 2000); // at most 2 seconds + if (max_retry_msecs == 1) { max_retry_msecs = 100; } // if one sets the option to "true" + for (long tries = 1; tries <= 10; tries++) { // try at most 10 times (=2200ms) + void* p = win_virtual_alloc_prim_once(addr, size, try_alignment, flags); + if (p != NULL) { + // success, return the address + return p; + } + else if (max_retry_msecs > 0 && (try_alignment <= 8*MI_MiB) && + (flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 && + win_is_out_of_memory_error(GetLastError())) { + // if committing regular memory and being out-of-memory, + // keep trying for a bit in case memory frees up after all. See issue #894 + _mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags); + long sleep_msecs = tries*40; // increasing waits + if (sleep_msecs > max_retry_msecs) { sleep_msecs = max_retry_msecs; } + max_retry_msecs -= sleep_msecs; + Sleep(sleep_msecs); + } + else { + // otherwise return with an error + break; + } + } + return NULL; +} + +static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { + mi_assert_internal(!(large_only && !allow_large)); + static _Atomic(size_t) large_page_try_ok; // = 0; + void* p = NULL; + // Try to allocate large OS pages (2MiB) if allowed or required. + if ((large_only || _mi_os_use_large_page(size, try_alignment)) + && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. + // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + // large OS pages must always reserve and commit. + *is_large = true; + p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES); + if (large_only) return p; + // fall back to non-large page allocation on error (`p == NULL`). + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations + } + } + } + // Fall back to regular page allocation + if (p == NULL) { + *is_large = ((flags&MEM_LARGE_PAGES) != 0); + p = win_virtual_alloc_prim(addr, size, try_alignment, flags); + } + //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); } + return p; +} + +int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + *is_zero = true; + int flags = MEM_RESERVE; + if (commit) { flags |= MEM_COMMIT; } + *addr = win_virtual_alloc(hint_addr, size, try_alignment, flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- +#ifdef _MSC_VER +#pragma warning(disable:6250) // suppress warning calling VirtualFree without MEM_RELEASE (for decommit) +#endif + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + *is_zero = false; + /* + // zero'ing only happens on an initial commit... but checking upfront seems expensive.. + _MEMORY_BASIC_INFORMATION meminfo; _mi_memzero_var(meminfo); + if (VirtualQuery(addr, &meminfo, size) > 0) { + if ((meminfo.State & MEM_COMMIT) == 0) { + *is_zero = true; + } + } + */ + // commit + void* p = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE); + if (p == NULL) return (int)GetLastError(); + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT); + *needs_recommit = true; // for safety, assume always decommitted even in the case of an error. + return (ok ? 0 : (int)GetLastError()); +} + +int _mi_prim_reset(void* addr, size_t size) { + void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + mi_assert_internal(p == addr); + #if 0 + if (p != NULL) { + VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory directly from the working set + } + #endif + return (p != NULL ? 0 : (int)GetLastError()); +} + +int _mi_prim_reuse(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + DWORD oldprotect = 0; + BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); + return (ok ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int numa_node) +{ + const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE; + + win_enable_large_os_pages(NULL); + + MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} }; + // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages + static bool mi_huge_pages_available = true; + if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) { + params[0].Type.Type = MiMemExtendedParameterAttributeFlags; + params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; + ULONG param_count = 1; + if (numa_node >= 0) { + param_count++; + params[1].Type.Type = MiMemExtendedParameterNumaNode; + params[1].Arg.ULong = (unsigned)numa_node; + } + SIZE_T psize = size; + void* base = hint_addr; + LONG err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); + if (err == 0 && base != NULL) { + return base; + } + else { + // fall back to regular large pages + mi_huge_pages_available = false; // don't try further huge pages + _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); + } + } + // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation + if (pVirtualAlloc2 != NULL && numa_node >= 0) { + params[0].Type.Type = MiMemExtendedParameterNumaNode; + params[0].Arg.ULong = (unsigned)numa_node; + return (*pVirtualAlloc2)(GetCurrentProcess(), hint_addr, size, flags, PAGE_READWRITE, params, 1); + } + + // otherwise use regular virtual alloc on older windows + return VirtualAlloc(hint_addr, size, flags, PAGE_READWRITE); +} + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + *is_zero = true; + *addr = _mi_prim_alloc_huge_os_pagesx(hint_addr,size,numa_node); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Numa nodes +//--------------------------------------------- + +size_t _mi_prim_numa_node(void) { + USHORT numa_node = 0; + if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) { + // Extended API is supported + MI_PROCESSOR_NUMBER pnum; + (*pGetCurrentProcessorNumberEx)(&pnum); + USHORT nnode = 0; + BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode); + if (ok) { numa_node = nnode; } + } + else if (pGetNumaProcessorNode != NULL) { + // Vista or earlier, use older API that is limited to 64 processors. Issue #277 + DWORD pnum = GetCurrentProcessorNumber(); + UCHAR nnode = 0; + BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode); + if (ok) { numa_node = nnode; } + } + return numa_node; +} + +size_t _mi_prim_numa_node_count(void) { + ULONG numa_max = 0; + if (pGetNumaHighestNodeNumber!=NULL) { + (*pGetNumaHighestNodeNumber)(&numa_max); + } + // find the highest node number that has actual processors assigned to it. Issue #282 + while (numa_max > 0) { + if (pGetNumaNodeProcessorMaskEx != NULL) { + // Extended API is supported + GROUP_AFFINITY affinity; + if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) { + if (affinity.Mask != 0) break; // found the maximum non-empty node + } + } + else { + // Vista or earlier, use older API that is limited to 64 processors. + ULONGLONG mask; + if (pGetNumaNodeProcessorMask != NULL) { + if ((*pGetNumaNodeProcessorMask)((UCHAR)numa_max, &mask)) { + if (mask != 0) break; // found the maximum non-empty node + } + }; + } + // max node was invalid or had no processor assigned, try again + numa_max--; + } + return ((size_t)numa_max + 1); +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) { + static LARGE_INTEGER mfreq; // = 0 + if (mfreq.QuadPart == 0LL) { + LARGE_INTEGER f; + QueryPerformanceFrequency(&f); + mfreq.QuadPart = f.QuadPart/1000LL; + if (mfreq.QuadPart == 0) mfreq.QuadPart = 1; + } + return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart); +} + +mi_msecs_t _mi_prim_clock_now(void) { + LARGE_INTEGER t; + QueryPerformanceCounter(&t); + return mi_to_msecs(t); +} + + +//---------------------------------------------------------------- +// Process Info +//---------------------------------------------------------------- + +#include + +static mi_msecs_t filetime_msecs(const FILETIME* ftime) { + ULARGE_INTEGER i; + i.LowPart = ftime->dwLowDateTime; + i.HighPart = ftime->dwHighDateTime; + mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds + return msecs; +} + +typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD); +static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL; + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + FILETIME ct; + FILETIME ut; + FILETIME st; + FILETIME et; + GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); + pinfo->utime = filetime_msecs(&ut); + pinfo->stime = filetime_msecs(&st); + + // load psapi on demand + if (pGetProcessMemoryInfo == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll")); + if (hDll != NULL) { + pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo"); + } + } + + // get process info + PROCESS_MEMORY_COUNTERS info; _mi_memzero_var(info); + if (pGetProcessMemoryInfo != NULL) { + pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); + } + pinfo->current_rss = (size_t)info.WorkingSetSize; + pinfo->peak_rss = (size_t)info.PeakWorkingSetSize; + pinfo->current_commit = (size_t)info.PagefileUsage; + pinfo->peak_commit = (size_t)info.PeakPagefileUsage; + pinfo->page_faults = (size_t)info.PageFaultCount; +} + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) +{ + // on windows with redirection, the C runtime cannot handle locale dependent output + // after the main thread closes so we use direct console output. + if (!_mi_preloading()) { + // _cputs(msg); // _cputs cannot be used as it aborts when failing to lock the console + static HANDLE hcon = INVALID_HANDLE_VALUE; + static bool hconIsConsole = false; + if (hcon == INVALID_HANDLE_VALUE) { + hcon = GetStdHandle(STD_ERROR_HANDLE); + #ifdef MI_HAS_CONSOLE_IO + CONSOLE_SCREEN_BUFFER_INFO sbi; + hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi)); + #endif + } + const size_t len = _mi_strlen(msg); + if (len > 0 && len < UINT32_MAX) { + DWORD written = 0; + if (hconIsConsole) { + #ifdef MI_HAS_CONSOLE_IO + WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL); + #endif + } + else if (hcon != INVALID_HANDLE_VALUE) { + // use direct write if stderr was redirected + WriteFile(hcon, msg, (DWORD)len, &written, NULL); + } + else { + // finally fall back to fputs after all + fputs(msg, stderr); + } + } + } +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +// On Windows use GetEnvironmentVariable instead of getenv to work +// reliably even when this is invoked before the C runtime is initialized. +// i.e. when `_mi_preloading() == true`. +// Note: on windows, environment names are not case sensitive. +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + result[0] = 0; + size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); + return (len > 0 && len < result_size); +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus) +// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using +// dynamic overriding, we observed it can raise an exception when compiled with C++, and +// sometimes deadlocks when also running under the VS debugger. +// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom. +// To be continued.. +#pragma comment (lib,"advapi32.lib") +#define RtlGenRandom SystemFunction036 +mi_decl_externc BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength); + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return (RtlGenRandom(buf, (ULONG)buf_len) != 0); +} + +#else + +#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG +#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002 +#endif + +typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG); +static PBCryptGenRandom pBCryptGenRandom = NULL; + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + if (pBCryptGenRandom == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll")); + if (hDll != NULL) { + pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom"); + } + if (pBCryptGenRandom == NULL) return false; + } + return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); +} + +#endif // MI_USE_RTLGENRANDOM + + + +//---------------------------------------------------------------- +// Process & Thread Init/Done +//---------------------------------------------------------------- + +#if MI_WIN_USE_FIXED_TLS==1 +mi_decl_cache_align size_t _mi_win_tls_offset = 0; +#endif + +//static void mi_debug_out(const char* s) { +// HANDLE h = GetStdHandle(STD_ERROR_HANDLE); +// WriteConsole(h, s, (DWORD)_mi_strlen(s), NULL, NULL); +//} + +static void mi_win_tls_init(DWORD reason) { + if (reason==DLL_PROCESS_ATTACH || reason==DLL_THREAD_ATTACH) { + #if MI_WIN_USE_FIXED_TLS==1 // we must allocate a TLS slot dynamically + if (_mi_win_tls_offset == 0 && reason == DLL_PROCESS_ATTACH) { + const DWORD tls_slot = TlsAlloc(); // usually returns slot 1 + if (tls_slot == TLS_OUT_OF_INDEXES) { + _mi_error_message(EFAULT, "unable to allocate the a TLS slot (rebuild without MI_WIN_USE_FIXED_TLS?)\n"); + } + _mi_win_tls_offset = (size_t)tls_slot * sizeof(void*); + } + #endif + #if MI_HAS_TLS_SLOT >= 2 // we must initialize the TLS slot before any allocation + if (mi_prim_get_default_heap() == NULL) { + _mi_heap_set_default_direct((mi_heap_t*)&_mi_heap_empty); + #if MI_DEBUG && MI_WIN_USE_FIXED_TLS==1 + void* const p = TlsGetValue((DWORD)(_mi_win_tls_offset / sizeof(void*))); + mi_assert_internal(p == (void*)&_mi_heap_empty); + #endif + } + #endif + } +} + +static void NTAPI mi_win_main(PVOID module, DWORD reason, LPVOID reserved) { + MI_UNUSED(reserved); + MI_UNUSED(module); + mi_win_tls_init(reason); + if (reason==DLL_PROCESS_ATTACH) { + _mi_auto_process_init(); + } + else if (reason==DLL_PROCESS_DETACH) { + _mi_auto_process_done(); + } + else if (reason==DLL_THREAD_DETACH && !_mi_is_redirected()) { + _mi_thread_done(NULL); + } +} + + +#if defined(MI_SHARED_LIB) + #define MI_PRIM_HAS_PROCESS_ATTACH 1 + + // Windows DLL: easy to hook into process_init and thread_done + BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { + mi_win_main((PVOID)inst,reason,reserved); + return TRUE; + } + + // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event. + void _mi_prim_thread_init_auto_done(void) { } + void _mi_prim_thread_done_auto_done(void) { } + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + } + +#elif !defined(MI_WIN_USE_FLS) + #define MI_PRIM_HAS_PROCESS_ATTACH 1 + + static void NTAPI mi_win_main_attach(PVOID module, DWORD reason, LPVOID reserved) { + if (reason == DLL_PROCESS_ATTACH || reason == DLL_THREAD_ATTACH) { + mi_win_main(module, reason, reserved); + } + } + static void NTAPI mi_win_main_detach(PVOID module, DWORD reason, LPVOID reserved) { + if (reason == DLL_PROCESS_DETACH || reason == DLL_THREAD_DETACH) { + mi_win_main(module, reason, reserved); + } + } + + // Set up TLS callbacks in a statically linked library by using special data sections. + // See + // We use 2 entries to ensure we call attach events before constructors + // are called, and detach events after destructors are called. + #if defined(__cplusplus) + extern "C" { + #endif + + #if defined(_WIN64) + #pragma comment(linker, "/INCLUDE:_tls_used") + #pragma comment(linker, "/INCLUDE:_mi_tls_callback_pre") + #pragma comment(linker, "/INCLUDE:_mi_tls_callback_post") + #pragma const_seg(".CRT$XLB") + extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[]; + const PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach }; + #pragma const_seg() + #pragma const_seg(".CRT$XLY") + extern const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[]; + const PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach }; + #pragma const_seg() + #else + #pragma comment(linker, "/INCLUDE:__tls_used") + #pragma comment(linker, "/INCLUDE:__mi_tls_callback_pre") + #pragma comment(linker, "/INCLUDE:__mi_tls_callback_post") + #pragma data_seg(".CRT$XLB") + PIMAGE_TLS_CALLBACK _mi_tls_callback_pre[] = { &mi_win_main_attach }; + #pragma data_seg() + #pragma data_seg(".CRT$XLY") + PIMAGE_TLS_CALLBACK _mi_tls_callback_post[] = { &mi_win_main_detach }; + #pragma data_seg() + #endif + + #if defined(__cplusplus) + } + #endif + + // nothing to do since `_mi_thread_done` is handled through the DLL_THREAD_DETACH event. + void _mi_prim_thread_init_auto_done(void) { } + void _mi_prim_thread_done_auto_done(void) { } + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + } + +#else // deprecated: statically linked, use fiber api + + #if defined(_MSC_VER) // on clang/gcc use the constructor attribute (in `src/prim/prim.c`) + // MSVC: use data section magic for static libraries + // See + #define MI_PRIM_HAS_PROCESS_ATTACH 1 + + static int mi_process_attach(void) { + mi_win_main(NULL,DLL_PROCESS_ATTACH,NULL); + atexit(&_mi_auto_process_done); + return 0; + } + typedef int(*mi_crt_callback_t)(void); + #if defined(_WIN64) + #pragma comment(linker, "/INCLUDE:_mi_tls_callback") + #pragma section(".CRT$XIU", long, read) + #else + #pragma comment(linker, "/INCLUDE:__mi_tls_callback") + #endif + #pragma data_seg(".CRT$XIU") + mi_decl_externc mi_crt_callback_t _mi_tls_callback[] = { &mi_process_attach }; + #pragma data_seg() + #endif + + // use the fiber api for calling `_mi_thread_done`. + #include + #if (_WIN32_WINNT < 0x600) // before Windows Vista + WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); + WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); + WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); + WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); + #endif + + static DWORD mi_fls_key = (DWORD)(-1); + + static void NTAPI mi_fls_done(PVOID value) { + mi_heap_t* heap = (mi_heap_t*)value; + if (heap != NULL) { + _mi_thread_done(heap); + FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672 + } + } + + void _mi_prim_thread_init_auto_done(void) { + mi_fls_key = FlsAlloc(&mi_fls_done); + } + + void _mi_prim_thread_done_auto_done(void) { + // call thread-done on all threads (except the main thread) to prevent + // dangling callback pointer if statically linked with a DLL; Issue #208 + FlsFree(mi_fls_key); + } + + void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + mi_assert_internal(mi_fls_key != (DWORD)(-1)); + FlsSetValue(mi_fls_key, heap); + } +#endif + +// ---------------------------------------------------- +// Communicate with the redirection module on Windows +// ---------------------------------------------------- +#if defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) + #define MI_PRIM_HAS_ALLOCATOR_INIT 1 + + static bool mi_redirected = false; // true if malloc redirects to mi_malloc + + bool _mi_is_redirected(void) { + return mi_redirected; + } + + #ifdef __cplusplus + extern "C" { + #endif + mi_decl_export void _mi_redirect_entry(DWORD reason) { + // called on redirection; careful as this may be called before DllMain + mi_win_tls_init(reason); + if (reason == DLL_PROCESS_ATTACH) { + mi_redirected = true; + } + else if (reason == DLL_PROCESS_DETACH) { + mi_redirected = false; + } + else if (reason == DLL_THREAD_DETACH) { + _mi_thread_done(NULL); + } + } + __declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); + __declspec(dllimport) void mi_cdecl mi_allocator_done(void); + #ifdef __cplusplus + } + #endif + bool _mi_allocator_init(const char** message) { + return mi_allocator_init(message); + } + void _mi_allocator_done(void) { + mi_allocator_done(); + } +#endif + +bool _mi_prim_thread_is_in_threadpool(void) { + #if (MI_ARCH_X64 || MI_ARCH_X86 || MI_ARCH_ARM64) + if (win_major_version >= 6) { + // check if this thread belongs to a windows threadpool + // see: + struct _TEB* const teb = NtCurrentTeb(); + void* const pool_data = *((void**)((uint8_t*)teb + (MI_SIZE_BITS == 32 ? 0x0F90 : 0x1778))); + return (pool_data != NULL); + } + #endif + return false; +} diff --git a/depends/mimalloc/src/prim/windows/readme.md b/depends/mimalloc/src/prim/windows/readme.md new file mode 100644 index 000000000000..217c3d174db4 --- /dev/null +++ b/depends/mimalloc/src/prim/windows/readme.md @@ -0,0 +1,17 @@ +## Primitives: + +- `prim.c` contains Windows primitives for OS allocation. + +## Event Tracing for Windows (ETW) + +- `etw.h` is generated from `etw.man` which contains the manifest for mimalloc events. + (100 is an allocation, 101 is for a free) + +- `etw-mimalloc.wprp` is a profile for the Windows Performance Recorder (WPR). + In an admin prompt, you can use: + ``` + > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode + > + > wpr -stop test.etl + ``` + and then open `test.etl` in the Windows Performance Analyzer (WPA). \ No newline at end of file diff --git a/depends/mimalloc/src/random.c b/depends/mimalloc/src/random.c index a5f5e6b829c3..990e4894f3be 100644 --- a/depends/mimalloc/src/random.c +++ b/depends/mimalloc/src/random.c @@ -4,14 +4,9 @@ This is free software; you can redistribute it and/or modify it under the terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ -#ifndef _DEFAULT_SOURCE -#define _DEFAULT_SOURCE // for syscall() on Linux -#endif - #include "mimalloc.h" -#include "mimalloc-internal.h" - -#include // memset +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // _mi_prim_random_buf /* ---------------------------------------------------------------------------- We use our own PRNG to keep predictable performance of random number generation @@ -37,15 +32,11 @@ The implementation uses regular C code which compiles very well on modern compil (gcc x64 has no register spills, and clang 6+ uses SSE instructions) -----------------------------------------------------------------------------*/ -static inline uint32_t rotl(uint32_t x, uint32_t shift) { - return (x << shift) | (x >> (32 - shift)); -} - static inline void qround(uint32_t x[16], size_t a, size_t b, size_t c, size_t d) { - x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 16); - x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 12); - x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 8); - x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 7); + x[a] += x[b]; x[d] = mi_rotl32(x[d] ^ x[a], 16); + x[c] += x[d]; x[b] = mi_rotl32(x[b] ^ x[c], 12); + x[a] += x[b]; x[d] = mi_rotl32(x[d] ^ x[a], 8); + x[c] += x[d]; x[b] = mi_rotl32(x[b] ^ x[c], 7); } static void chacha_block(mi_random_ctx_t* ctx) @@ -103,7 +94,7 @@ static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t no // since we only use chacha for randomness (and not encryption) we // do not _need_ to read 32-bit values as little endian but we do anyways // just for being compatible :-) - memset(ctx, 0, sizeof(*ctx)); + _mi_memzero(ctx, sizeof(*ctx)); for (size_t i = 0; i < 4; i++) { const uint8_t* sigma = (uint8_t*)"expand 32-byte k"; ctx->input[i] = read32(sigma,i); @@ -118,7 +109,7 @@ static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t no } static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) { - memset(ctx_new, 0, sizeof(*ctx_new)); + _mi_memzero(ctx_new, sizeof(*ctx_new)); _mi_memcpy(ctx_new->input, ctx->input, sizeof(ctx_new->input)); ctx_new->input[12] = 0; ctx_new->input[13] = 0; @@ -147,179 +138,72 @@ void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) { uintptr_t _mi_random_next(mi_random_ctx_t* ctx) { mi_assert_internal(mi_random_is_initialized(ctx)); - #if MI_INTPTR_SIZE <= 4 - return chacha_next32(ctx); - #elif MI_INTPTR_SIZE == 8 - return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); - #else - # error "define mi_random_next for this platform" - #endif + uintptr_t r; + do { + #if MI_INTPTR_SIZE <= 4 + r = chacha_next32(ctx); + #elif MI_INTPTR_SIZE == 8 + r = (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); + #else + # error "define mi_random_next for this platform" + #endif + } while (r==0); + return r; } /* ---------------------------------------------------------------------------- -To initialize a fresh random context we rely on the OS: -- Windows : BCryptGenRandom (or RtlGenRandom) -- macOS : CCRandomGenerateBytes, arc4random_buf -- bsd,wasi : arc4random_buf -- Linux : getrandom,/dev/urandom +To initialize a fresh random context. If we cannot get good randomness, we fall back to weak randomness based on a timer and ASLR. -----------------------------------------------------------------------------*/ -#if defined(_WIN32) - -#if defined(MI_USE_RTLGENRANDOM) || defined(__cplusplus) -// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using -// dynamic overriding, we observed it can raise an exception when compiled with C++, and -// sometimes deadlocks when also running under the VS debugger. -// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom. -// To be continued.. -#pragma comment (lib,"advapi32.lib") -#define RtlGenRandom SystemFunction036 -#ifdef __cplusplus -extern "C" { -#endif -BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength); -#ifdef __cplusplus -} -#endif -static bool os_random_buf(void* buf, size_t buf_len) { - return (RtlGenRandom(buf, (ULONG)buf_len) != 0); -} -#else -#pragma comment (lib,"bcrypt.lib") -#include -static bool os_random_buf(void* buf, size_t buf_len) { - return (BCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); -} -#endif - -#elif defined(__APPLE__) -#include -#if defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10 -#include -#include -#endif -static bool os_random_buf(void* buf, size_t buf_len) { - #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 - // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf - // may fail silently on macOS. See PR #390, and - return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); - #else - // fall back on older macOS - arc4random_buf(buf, buf_len); - return true; - #endif -} - -#elif defined(__ANDROID__) || defined(__DragonFly__) || \ - defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ - defined(__sun) // todo: what to use with __wasi__? -#include -static bool os_random_buf(void* buf, size_t buf_len) { - arc4random_buf(buf, buf_len); - return true; -} -#elif defined(__linux__) || defined(__HAIKU__) -#if defined(__linux__) -#include -#endif -#include -#include -#include -#include -#include -static bool os_random_buf(void* buf, size_t buf_len) { - // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h` - // and for the latter the actual `getrandom` call is not always defined. - // (see ) - // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed. -#ifdef SYS_getrandom - #ifndef GRND_NONBLOCK - #define GRND_NONBLOCK (1) - #endif - static _Atomic(uintptr_t) no_getrandom; // = 0 - if (mi_atomic_load_acquire(&no_getrandom)==0) { - ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK); - if (ret >= 0) return (buf_len == (size_t)ret); - if (errno != ENOSYS) return false; - mi_atomic_store_release(&no_getrandom, 1UL); // don't call again, and fall back to /dev/urandom - } -#endif - int flags = O_RDONLY; - #if defined(O_CLOEXEC) - flags |= O_CLOEXEC; - #endif - int fd = open("/dev/urandom", flags, 0); - if (fd < 0) return false; - size_t count = 0; - while(count < buf_len) { - ssize_t ret = read(fd, (char*)buf + count, buf_len - count); - if (ret<=0) { - if (errno!=EAGAIN && errno!=EINTR) break; - } - else { - count += ret; - } - } - close(fd); - return (count==buf_len); -} -#else -static bool os_random_buf(void* buf, size_t buf_len) { - return false; -} -#endif - -#if defined(_WIN32) -#include -#elif defined(__APPLE__) -#include -#else -#include -#endif - uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random - - #if defined(_WIN32) - LARGE_INTEGER pcount; - QueryPerformanceCounter(&pcount); - x ^= (uintptr_t)(pcount.QuadPart); - #elif defined(__APPLE__) - x ^= (uintptr_t)mach_absolute_time(); - #else - struct timespec time; - clock_gettime(CLOCK_MONOTONIC, &time); - x ^= (uintptr_t)time.tv_sec; - x ^= (uintptr_t)time.tv_nsec; - #endif + x ^= _mi_prim_clock_now(); // and do a few randomization steps uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; - for (uintptr_t i = 0; i < max; i++) { + for (uintptr_t i = 0; i < max || x==0; i++, x++) { x = _mi_random_shuffle(x); } mi_assert_internal(x != 0); return x; } -void _mi_random_init(mi_random_ctx_t* ctx) { +static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) { uint8_t key[32]; - if (!os_random_buf(key, sizeof(key))) { + if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) { // if we fail to get random data from the OS, we fall back to a // weak random source based on the current time #if !defined(__wasi__) - _mi_warning_message("unable to use secure randomness\n"); + if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); } #endif uintptr_t x = _mi_os_random_weak(0); - for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words. + for (size_t i = 0; i < 8; i++, x++) { // key is eight 32-bit words. x = _mi_random_shuffle(x); ((uint32_t*)key)[i] = (uint32_t)x; } + ctx->weak = true; + } + else { + ctx->weak = false; } chacha_init(ctx, key, (uintptr_t)ctx /*nonce*/ ); } +void _mi_random_init(mi_random_ctx_t* ctx) { + mi_random_init_ex(ctx, false); +} + +void _mi_random_init_weak(mi_random_ctx_t * ctx) { + mi_random_init_ex(ctx, true); +} + +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx) { + if (ctx->weak) { + _mi_random_init(ctx); + } +} + /* -------------------------------------------------------- test vectors from ----------------------------------------------------------- */ diff --git a/depends/mimalloc/src/region.c b/depends/mimalloc/src/region.c deleted file mode 100644 index 8b04387dfe08..000000000000 --- a/depends/mimalloc/src/region.c +++ /dev/null @@ -1,506 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2019-2020, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ - -/* ---------------------------------------------------------------------------- -This implements a layer between the raw OS memory (VirtualAlloc/mmap/sbrk/..) -and the segment and huge object allocation by mimalloc. There may be multiple -implementations of this (one could be the identity going directly to the OS, -another could be a simple cache etc), but the current one uses large "regions". -In contrast to the rest of mimalloc, the "regions" are shared between threads and -need to be accessed using atomic operations. -We need this memory layer between the raw OS calls because of: -1. on `sbrk` like systems (like WebAssembly) we need our own memory maps in order - to reuse memory effectively. -2. It turns out that for large objects, between 1MiB and 32MiB (?), the cost of - an OS allocation/free is still (much) too expensive relative to the accesses - in that object :-( (`malloc-large` tests this). This means we need a cheaper - way to reuse memory. -3. This layer allows for NUMA aware allocation. - -Possible issues: -- (2) can potentially be addressed too with a small cache per thread which is much - simpler. Generally though that requires shrinking of huge pages, and may overuse - memory per thread. (and is not compatible with `sbrk`). -- Since the current regions are per-process, we need atomic operations to - claim blocks which may be contended -- In the worst case, we need to search the whole region map (16KiB for 256GiB) - linearly. At what point will direct OS calls be faster? Is there a way to - do this better without adding too much complexity? ------------------------------------------------------------------------------*/ -#include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include // memset - -#include "bitmap.h" - -// Internal raw OS interface -size_t _mi_os_large_page_size(void); -bool _mi_os_protect(void* addr, size_t size); -bool _mi_os_unprotect(void* addr, size_t size); -bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); -bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats); -bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats); - -// arena.c -mi_arena_id_t _mi_arena_id_none(void); -void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats); -void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); -void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld); - - - -// Constants -#if (MI_INTPTR_SIZE==8) -#define MI_HEAP_REGION_MAX_SIZE (256 * MI_GiB) // 64KiB for the region map -#elif (MI_INTPTR_SIZE==4) -#define MI_HEAP_REGION_MAX_SIZE (3 * MI_GiB) // ~ KiB for the region map -#else -#error "define the maximum heap space allowed for regions on this platform" -#endif - -#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE - -#define MI_REGION_MAX_BLOCKS MI_BITMAP_FIELD_BITS -#define MI_REGION_SIZE (MI_SEGMENT_SIZE * MI_BITMAP_FIELD_BITS) // 256MiB (64MiB on 32 bits) -#define MI_REGION_MAX (MI_HEAP_REGION_MAX_SIZE / MI_REGION_SIZE) // 1024 (48 on 32 bits) -#define MI_REGION_MAX_OBJ_BLOCKS (MI_REGION_MAX_BLOCKS/4) // 64MiB -#define MI_REGION_MAX_OBJ_SIZE (MI_REGION_MAX_OBJ_BLOCKS*MI_SEGMENT_SIZE) - -// Region info -typedef union mi_region_info_u { - size_t value; - struct { - bool valid; // initialized? - bool is_large:1; // allocated in fixed large/huge OS pages - bool is_pinned:1; // pinned memory cannot be decommitted - short numa_node; // the associated NUMA node (where -1 means no associated node) - } x; -} mi_region_info_t; - - -// A region owns a chunk of REGION_SIZE (256MiB) (virtual) memory with -// a bit map with one bit per MI_SEGMENT_SIZE (4MiB) block. -typedef struct mem_region_s { - _Atomic(size_t) info; // mi_region_info_t.value - _Atomic(void*) start; // start of the memory area - mi_bitmap_field_t in_use; // bit per in-use block - mi_bitmap_field_t dirty; // track if non-zero per block - mi_bitmap_field_t commit; // track if committed per block - mi_bitmap_field_t reset; // track if reset per block - _Atomic(size_t) arena_memid; // if allocated from a (huge page) arena - _Atomic(size_t) padding; // round to 8 fields (needs to be atomic for msvc, see issue #508) -} mem_region_t; - -// The region map -static mem_region_t regions[MI_REGION_MAX]; - -// Allocated regions -static _Atomic(size_t) regions_count; // = 0; - - -/* ---------------------------------------------------------------------------- -Utility functions ------------------------------------------------------------------------------*/ - -// Blocks (of 4MiB) needed for the given size. -static size_t mi_region_block_count(size_t size) { - return _mi_divide_up(size, MI_SEGMENT_SIZE); -} - -/* -// Return a rounded commit/reset size such that we don't fragment large OS pages into small ones. -static size_t mi_good_commit_size(size_t size) { - if (size > (SIZE_MAX - _mi_os_large_page_size())) return size; - return _mi_align_up(size, _mi_os_large_page_size()); -} -*/ - -// Return if a pointer points into a region reserved by us. -mi_decl_nodiscard bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { - if (p==NULL) return false; - size_t count = mi_atomic_load_relaxed(®ions_count); - for (size_t i = 0; i < count; i++) { - uint8_t* start = (uint8_t*)mi_atomic_load_ptr_relaxed(uint8_t, ®ions[i].start); - if (start != NULL && (uint8_t*)p >= start && (uint8_t*)p < start + MI_REGION_SIZE) return true; - } - return false; -} - - -static void* mi_region_blocks_start(const mem_region_t* region, mi_bitmap_index_t bit_idx) { - uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t, &((mem_region_t*)region)->start); - mi_assert_internal(start != NULL); - return (start + (bit_idx * MI_SEGMENT_SIZE)); -} - -static size_t mi_memid_create(mem_region_t* region, mi_bitmap_index_t bit_idx) { - mi_assert_internal(bit_idx < MI_BITMAP_FIELD_BITS); - size_t idx = region - regions; - mi_assert_internal(®ions[idx] == region); - return (idx*MI_BITMAP_FIELD_BITS + bit_idx)<<1; -} - -static size_t mi_memid_create_from_arena(size_t arena_memid) { - return (arena_memid << 1) | 1; -} - - -static bool mi_memid_is_arena(size_t id, mem_region_t** region, mi_bitmap_index_t* bit_idx, size_t* arena_memid) { - if ((id&1)==1) { - if (arena_memid != NULL) *arena_memid = (id>>1); - return true; - } - else { - size_t idx = (id >> 1) / MI_BITMAP_FIELD_BITS; - *bit_idx = (mi_bitmap_index_t)(id>>1) % MI_BITMAP_FIELD_BITS; - *region = ®ions[idx]; - return false; - } -} - - -/* ---------------------------------------------------------------------------- - Allocate a region is allocated from the OS (or an arena) ------------------------------------------------------------------------------*/ - -static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld) -{ - // not out of regions yet? - if (mi_atomic_load_relaxed(®ions_count) >= MI_REGION_MAX - 1) return false; - - // try to allocate a fresh region from the OS - bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit)); - bool region_large = (commit && allow_large); - bool is_zero = false; - bool is_pinned = false; - size_t arena_memid = 0; - void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, _mi_arena_id_none(), & arena_memid, tld); - if (start == NULL) return false; - mi_assert_internal(!(region_large && !allow_large)); - mi_assert_internal(!region_large || region_commit); - - // claim a fresh slot - const size_t idx = mi_atomic_increment_acq_rel(®ions_count); - if (idx >= MI_REGION_MAX) { - mi_atomic_decrement_acq_rel(®ions_count); - _mi_arena_free(start, MI_REGION_SIZE, arena_memid, region_commit, tld->stats); - _mi_warning_message("maximum regions used: %zu GiB (perhaps recompile with a larger setting for MI_HEAP_REGION_MAX_SIZE)", _mi_divide_up(MI_HEAP_REGION_MAX_SIZE, MI_GiB)); - return false; - } - - // allocated, initialize and claim the initial blocks - mem_region_t* r = ®ions[idx]; - r->arena_memid = arena_memid; - mi_atomic_store_release(&r->in_use, (size_t)0); - mi_atomic_store_release(&r->dirty, (is_zero ? 0 : MI_BITMAP_FIELD_FULL)); - mi_atomic_store_release(&r->commit, (region_commit ? MI_BITMAP_FIELD_FULL : 0)); - mi_atomic_store_release(&r->reset, (size_t)0); - *bit_idx = 0; - _mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL); - mi_atomic_store_ptr_release(void,&r->start, start); - - // and share it - mi_region_info_t info; - info.value = 0; // initialize the full union to zero - info.x.valid = true; - info.x.is_large = region_large; - info.x.is_pinned = is_pinned; - info.x.numa_node = (short)_mi_os_numa_node(tld); - mi_atomic_store_release(&r->info, info.value); // now make it available to others - *region = r; - return true; -} - -/* ---------------------------------------------------------------------------- - Try to claim blocks in suitable regions ------------------------------------------------------------------------------*/ - -static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, bool allow_large ) { - // initialized at all? - mi_region_info_t info; - info.value = mi_atomic_load_relaxed(&((mem_region_t*)region)->info); - if (info.value==0) return false; - - // numa correct - if (numa_node >= 0) { // use negative numa node to always succeed - int rnode = info.x.numa_node; - if (rnode >= 0 && rnode != numa_node) return false; - } - - // check allow-large - if (!allow_large && info.x.is_large) return false; - - return true; -} - - -static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld) -{ - // try all regions for a free slot - const size_t count = mi_atomic_load_relaxed(®ions_count); // monotonic, so ok to be relaxed - size_t idx = tld->region_idx; // Or start at 0 to reuse low addresses? Starting at 0 seems to increase latency though - for (size_t visited = 0; visited < count; visited++, idx++) { - if (idx >= count) idx = 0; // wrap around - mem_region_t* r = ®ions[idx]; - // if this region suits our demand (numa node matches, large OS page matches) - if (mi_region_is_suitable(r, numa_node, allow_large)) { - // then try to atomically claim a segment(s) in this region - if (_mi_bitmap_try_find_claim_field(&r->in_use, 0, blocks, bit_idx)) { - tld->region_idx = idx; // remember the last found position - *region = r; - return true; - } - } - } - return false; -} - - -static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld) -{ - mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS); - mem_region_t* region; - mi_bitmap_index_t bit_idx; - const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld)); - // try to claim in existing regions - if (!mi_region_try_claim(numa_node, blocks, *large, ®ion, &bit_idx, tld)) { - // otherwise try to allocate a fresh region and claim in there - if (!mi_region_try_alloc_os(blocks, *commit, *large, ®ion, &bit_idx, tld)) { - // out of regions or memory - return NULL; - } - } - - // ------------------------------------------------ - // found a region and claimed `blocks` at `bit_idx`, initialize them now - mi_assert_internal(region != NULL); - mi_assert_internal(_mi_bitmap_is_claimed(®ion->in_use, 1, blocks, bit_idx)); - - mi_region_info_t info; - info.value = mi_atomic_load_acquire(®ion->info); - uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ion->start); - mi_assert_internal(!(info.x.is_large && !*large)); - mi_assert_internal(start != NULL); - - *is_zero = _mi_bitmap_claim(®ion->dirty, 1, blocks, bit_idx, NULL); - *large = info.x.is_large; - *is_pinned = info.x.is_pinned; - *memid = mi_memid_create(region, bit_idx); - void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE); - - // commit - if (*commit) { - // ensure commit - bool any_uncommitted; - _mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted); - if (any_uncommitted) { - mi_assert_internal(!info.x.is_large && !info.x.is_pinned); - bool commit_zero = false; - if (!_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld)) { - // failed to commit! unclaim and return - mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx); - return NULL; - } - if (commit_zero) *is_zero = true; - } - } - else { - // no need to commit, but check if already fully committed - *commit = _mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx); - } - mi_assert_internal(!*commit || _mi_bitmap_is_claimed(®ion->commit, 1, blocks, bit_idx)); - - // unreset reset blocks - if (_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) { - // some blocks are still reset - mi_assert_internal(!info.x.is_large && !info.x.is_pinned); - mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0); - mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx); - if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed - bool reset_zero = false; - _mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld); - if (reset_zero) *is_zero = true; - } - } - mi_assert_internal(!_mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)); - - #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED - if (*commit) { ((uint8_t*)p)[0] = 0; } - #endif - - // and return the allocation - mi_assert_internal(p != NULL); - return p; -} - - -/* ---------------------------------------------------------------------------- - Allocation ------------------------------------------------------------------------------*/ - -// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`. -// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`) -void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld) -{ - mi_assert_internal(memid != NULL && tld != NULL); - mi_assert_internal(size > 0); - *memid = 0; - *is_zero = false; - *is_pinned = false; - bool default_large = false; - if (large==NULL) large = &default_large; // ensure `large != NULL` - if (size == 0) return NULL; - size = _mi_align_up(size, _mi_os_page_size()); - - // allocate from regions if possible - void* p = NULL; - size_t arena_memid; - const size_t blocks = mi_region_block_count(size); - if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) { - p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld); - if (p == NULL) { - _mi_warning_message("unable to allocate from region: size %zu\n", size); - } - } - if (p == NULL) { - // and otherwise fall back to the OS - p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, _mi_arena_id_none(), & arena_memid, tld); - *memid = mi_memid_create_from_arena(arena_memid); - } - - if (p != NULL) { - mi_assert_internal((uintptr_t)p % alignment == 0); - #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED - if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed - #endif - } - return p; -} - - - -/* ---------------------------------------------------------------------------- -Free ------------------------------------------------------------------------------*/ - -// Free previously allocated memory with a given id. -void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_reset, mi_os_tld_t* tld) { - mi_assert_internal(size > 0 && tld != NULL); - if (p==NULL) return; - if (size==0) return; - size = _mi_align_up(size, _mi_os_page_size()); - - size_t arena_memid = 0; - mi_bitmap_index_t bit_idx; - mem_region_t* region; - if (mi_memid_is_arena(id,®ion,&bit_idx,&arena_memid)) { - // was a direct arena allocation, pass through - _mi_arena_free(p, size, arena_memid, full_commit, tld->stats); - } - else { - // allocated in a region - mi_assert_internal(size <= MI_REGION_MAX_OBJ_SIZE); if (size > MI_REGION_MAX_OBJ_SIZE) return; - const size_t blocks = mi_region_block_count(size); - mi_assert_internal(blocks + bit_idx <= MI_BITMAP_FIELD_BITS); - mi_region_info_t info; - info.value = mi_atomic_load_acquire(®ion->info); - mi_assert_internal(info.value != 0); - void* blocks_start = mi_region_blocks_start(region, bit_idx); - mi_assert_internal(blocks_start == p); // not a pointer in our area? - mi_assert_internal(bit_idx + blocks <= MI_BITMAP_FIELD_BITS); - if (blocks_start != p || bit_idx + blocks > MI_BITMAP_FIELD_BITS) return; // or `abort`? - - // committed? - if (full_commit && (size % MI_SEGMENT_SIZE) == 0) { - _mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, NULL); - } - - if (any_reset) { - // set the is_reset bits if any pages were reset - _mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, NULL); - } - - // reset the blocks to reduce the working set. - if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset) - && (mi_option_is_enabled(mi_option_eager_commit) || - mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead - { - bool any_unreset; - _mi_bitmap_claim(®ion->reset, 1, blocks, bit_idx, &any_unreset); - if (any_unreset) { - _mi_abandoned_await_readers(); // ensure no more pending write (in case reset = decommit) - _mi_mem_reset(p, blocks * MI_SEGMENT_SIZE, tld); - } - } - - // and unclaim - bool all_unclaimed = mi_bitmap_unclaim(®ion->in_use, 1, blocks, bit_idx); - mi_assert_internal(all_unclaimed); MI_UNUSED(all_unclaimed); - } -} - - -/* ---------------------------------------------------------------------------- - collection ------------------------------------------------------------------------------*/ -void _mi_mem_collect(mi_os_tld_t* tld) { - // free every region that has no segments in use. - size_t rcount = mi_atomic_load_relaxed(®ions_count); - for (size_t i = 0; i < rcount; i++) { - mem_region_t* region = ®ions[i]; - if (mi_atomic_load_relaxed(®ion->info) != 0) { - // if no segments used, try to claim the whole region - size_t m = mi_atomic_load_relaxed(®ion->in_use); - while (m == 0 && !mi_atomic_cas_weak_release(®ion->in_use, &m, MI_BITMAP_FIELD_FULL)) { /* nothing */ }; - if (m == 0) { - // on success, free the whole region - uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,®ions[i].start); - size_t arena_memid = mi_atomic_load_relaxed(®ions[i].arena_memid); - size_t commit = mi_atomic_load_relaxed(®ions[i].commit); - memset((void*)®ions[i], 0, sizeof(mem_region_t)); // cast to void* to avoid atomic warning - // and release the whole region - mi_atomic_store_release(®ion->info, (size_t)0); - if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) { - _mi_abandoned_await_readers(); // ensure no pending reads - _mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats); - } - } - } - } -} - - -/* ---------------------------------------------------------------------------- - Other ------------------------------------------------------------------------------*/ - -bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) { - return _mi_os_reset(p, size, tld->stats); -} - -bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) { - return _mi_os_unreset(p, size, is_zero, tld->stats); -} - -bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) { - return _mi_os_commit(p, size, is_zero, tld->stats); -} - -bool _mi_mem_decommit(void* p, size_t size, mi_os_tld_t* tld) { - return _mi_os_decommit(p, size, tld->stats); -} - -bool _mi_mem_protect(void* p, size_t size) { - return _mi_os_protect(p, size); -} - -bool _mi_mem_unprotect(void* p, size_t size) { - return _mi_os_unprotect(p, size); -} diff --git a/depends/mimalloc/src/segment-cache.c b/depends/mimalloc/src/segment-cache.c deleted file mode 100644 index da726716a574..000000000000 --- a/depends/mimalloc/src/segment-cache.c +++ /dev/null @@ -1,368 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2020, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ - -/* ---------------------------------------------------------------------------- - Implements a cache of segments to avoid expensive OS calls and to reuse - the commit_mask to optimize the commit/decommit calls. - The full memory map of all segments is also implemented here. ------------------------------------------------------------------------------*/ -#include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include "bitmap.h" // atomic bitmap - -//#define MI_CACHE_DISABLE 1 // define to completely disable the segment cache - -#define MI_CACHE_FIELDS (16) -#define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit - -#define BITS_SET() MI_ATOMIC_VAR_INIT(UINTPTR_MAX) -#define MI_CACHE_BITS_SET MI_INIT16(BITS_SET) // note: update if MI_CACHE_FIELDS changes - -typedef struct mi_cache_slot_s { - void* p; - size_t memid; - bool is_pinned; - mi_commit_mask_t commit_mask; - mi_commit_mask_t decommit_mask; - _Atomic(mi_msecs_t) expire; -} mi_cache_slot_t; - -static mi_decl_cache_align mi_cache_slot_t cache[MI_CACHE_MAX]; // = 0 - -static mi_decl_cache_align mi_bitmap_field_t cache_available[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; // zero bit = available! -static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; -static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free - -static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void* arg) { - mi_arena_id_t req_arena_id = *((mi_arena_id_t*)arg); - mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)]; - return _mi_arena_memid_is_suitable(slot->memid, req_arena_id); -} - -mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld) -{ -#ifdef MI_CACHE_DISABLE - return NULL; -#else - - // only segment blocks - if (size != MI_SEGMENT_SIZE) return NULL; - - // numa node determines start field - const int numa_node = _mi_os_numa_node(tld); - size_t start_field = 0; - if (numa_node > 0) { - start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node; - if (start_field >= MI_CACHE_FIELDS) start_field = 0; - } - - // find an available slot - mi_bitmap_index_t bitidx = 0; - bool claimed = false; - mi_arena_id_t req_arena_id = _req_arena_id; - mi_bitmap_pred_fun_t pred_fun = &mi_segment_cache_is_suitable; // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache? - - if (*large) { // large allowed? - claimed = _mi_bitmap_try_find_from_claim_pred(cache_available_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx); - if (claimed) *large = true; - } - if (!claimed) { - claimed = _mi_bitmap_try_find_from_claim_pred (cache_available, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx); - if (claimed) *large = false; - } - - if (!claimed) return NULL; - - // found a slot - mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)]; - void* p = slot->p; - *memid = slot->memid; - *is_pinned = slot->is_pinned; - *is_zero = false; - *commit_mask = slot->commit_mask; - *decommit_mask = slot->decommit_mask; - slot->p = NULL; - mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0); - - // mark the slot as free again - mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx)); - _mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx); - return p; -#endif -} - -static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats) -{ - if (mi_commit_mask_is_empty(cmask)) { - // nothing - } - else if (mi_commit_mask_is_full(cmask)) { - _mi_os_decommit(p, total, stats); - } - else { - // todo: one call to decommit the whole at once? - mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); - size_t part = total/MI_COMMIT_MASK_BITS; - size_t idx; - size_t count; - mi_commit_mask_foreach(cmask, idx, count) { - void* start = (uint8_t*)p + (idx*part); - size_t size = count*part; - _mi_os_decommit(start, size, stats); - } - mi_commit_mask_foreach_end() - } - mi_commit_mask_create_empty(cmask); -} - -#define MI_MAX_PURGE_PER_PUSH (4) - -static mi_decl_noinline void mi_segment_cache_purge(bool force, mi_os_tld_t* tld) -{ - MI_UNUSED(tld); - if (!mi_option_is_enabled(mi_option_allow_decommit)) return; - mi_msecs_t now = _mi_clock_now(); - size_t purged = 0; - const size_t max_visits = (force ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */); - size_t idx = (force ? 0 : _mi_random_shuffle((uintptr_t)now) % MI_CACHE_MAX /* random start */ ); - for (size_t visited = 0; visited < max_visits; visited++,idx++) { // visit N slots - if (idx >= MI_CACHE_MAX) idx = 0; // wrap - mi_cache_slot_t* slot = &cache[idx]; - mi_msecs_t expire = mi_atomic_loadi64_relaxed(&slot->expire); - if (expire != 0 && (force || now >= expire)) { // racy read - // seems expired, first claim it from available - purged++; - mi_bitmap_index_t bitidx = mi_bitmap_index_create_from_bit(idx); - if (_mi_bitmap_claim(cache_available, MI_CACHE_FIELDS, 1, bitidx, NULL)) { - // was available, we claimed it - expire = mi_atomic_loadi64_acquire(&slot->expire); - if (expire != 0 && (force || now >= expire)) { // safe read - // still expired, decommit it - mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0); - mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx)); - _mi_abandoned_await_readers(); // wait until safe to decommit - // decommit committed parts - // TODO: instead of decommit, we could also free to the OS? - mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats); - mi_commit_mask_create_empty(&slot->decommit_mask); - } - _mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop - } - if (!force && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push - } - } -} - -void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) { - mi_segment_cache_purge(force, tld ); -} - -mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld) -{ -#ifdef MI_CACHE_DISABLE - return false; -#else - - // only for normal segment blocks - if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false; - - // numa node determines start field - int numa_node = _mi_os_numa_node(NULL); - size_t start_field = 0; - if (numa_node > 0) { - start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node; - if (start_field >= MI_CACHE_FIELDS) start_field = 0; - } - - // purge expired entries - mi_segment_cache_purge(false /* force? */, tld); - - // find an available slot - mi_bitmap_index_t bitidx; - bool claimed = _mi_bitmap_try_find_from_claim(cache_inuse, MI_CACHE_FIELDS, start_field, 1, &bitidx); - if (!claimed) return false; - - mi_assert_internal(_mi_bitmap_is_claimed(cache_available, MI_CACHE_FIELDS, 1, bitidx)); - mi_assert_internal(_mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx)); -#if MI_DEBUG>1 - if (is_pinned || is_large) { - mi_assert_internal(mi_commit_mask_is_full(commit_mask)); - } -#endif - - // set the slot - mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)]; - slot->p = start; - slot->memid = memid; - slot->is_pinned = is_pinned; - mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0); - slot->commit_mask = *commit_mask; - slot->decommit_mask = *decommit_mask; - if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) { - long delay = mi_option_get(mi_option_segment_decommit_delay); - if (delay == 0) { - _mi_abandoned_await_readers(); // wait until safe to decommit - mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats); - mi_commit_mask_create_empty(&slot->decommit_mask); - } - else { - mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay); - } - } - - // make it available - _mi_bitmap_unclaim((is_large ? cache_available_large : cache_available), MI_CACHE_FIELDS, 1, bitidx); - return true; -#endif -} - - -/* ----------------------------------------------------------- - The following functions are to reliably find the segment or - block that encompasses any pointer p (or NULL if it is not - in any of our segments). - We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB) - set to 1 if it contains the segment meta data. ------------------------------------------------------------ */ - - -#if (MI_INTPTR_SIZE==8) -#define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB -#else -#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb -#endif - -#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) -#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) -#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) - -static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments - -static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { - mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE? - if ((uintptr_t)segment >= MI_MAX_ADDRESS) { - *bitidx = 0; - return MI_SEGMENT_MAP_WSIZE; - } - else { - const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; - *bitidx = segindex % MI_INTPTR_BITS; - const size_t mapindex = segindex / MI_INTPTR_BITS; - mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); - return mapindex; - } -} - -void _mi_segment_map_allocated_at(const mi_segment_t* segment) { - size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); - if (index==MI_SEGMENT_MAP_WSIZE) return; - uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); - uintptr_t newmask; - do { - newmask = (mask | ((uintptr_t)1 << bitidx)); - } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); -} - -void _mi_segment_map_freed_at(const mi_segment_t* segment) { - size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); - if (index == MI_SEGMENT_MAP_WSIZE) return; - uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); - uintptr_t newmask; - do { - newmask = (mask & ~((uintptr_t)1 << bitidx)); - } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); -} - -// Determine the segment belonging to a pointer or NULL if it is not in a valid segment. -static mi_segment_t* _mi_segment_of(const void* p) { - mi_segment_t* segment = _mi_ptr_segment(p); - if (segment == NULL) return NULL; - size_t bitidx; - size_t index = mi_segment_map_index_of(segment, &bitidx); - // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge - const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); - if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { - return segment; // yes, allocated by us - } - if (index==MI_SEGMENT_MAP_WSIZE) return NULL; - - // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? - - // search downwards for the first segment in case it is an interior pointer - // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough - // valid huge objects - // note: we could maintain a lowest index to speed up the path for invalid pointers? - size_t lobitidx; - size_t loindex; - uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); - if (lobits != 0) { - loindex = index; - lobitidx = mi_bsr(lobits); // lobits != 0 - } - else if (index == 0) { - return NULL; - } - else { - mi_assert_internal(index > 0); - uintptr_t lomask = mask; - loindex = index; - do { - loindex--; - lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); - } while (lomask != 0 && loindex > 0); - if (lomask == 0) return NULL; - lobitidx = mi_bsr(lomask); // lomask != 0 - } - mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); - // take difference as the addresses could be larger than the MAX_ADDRESS space. - size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; - segment = (mi_segment_t*)((uint8_t*)segment - diff); - - if (segment == NULL) return NULL; - mi_assert_internal((void*)segment < p); - bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(cookie_ok); - if mi_unlikely(!cookie_ok) return NULL; - if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range - mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); - return segment; -} - -// Is this a valid pointer in our heap? -static bool mi_is_valid_pointer(const void* p) { - return (_mi_segment_of(p) != NULL); -} - -mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { - return mi_is_valid_pointer(p); -} - -/* -// Return the full segment range belonging to a pointer -static void* mi_segment_range_of(const void* p, size_t* size) { - mi_segment_t* segment = _mi_segment_of(p); - if (segment == NULL) { - if (size != NULL) *size = 0; - return NULL; - } - else { - if (size != NULL) *size = segment->segment_size; - return segment; - } - mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); - mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); - mi_reset_delayed(tld); - mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); - return page; -} -*/ diff --git a/depends/mimalloc/src/segment.c b/depends/mimalloc/src/segment.c deleted file mode 100644 index 2ae591fde880..000000000000 --- a/depends/mimalloc/src/segment.c +++ /dev/null @@ -1,1553 +0,0 @@ -/* ---------------------------------------------------------------------------- -Copyright (c) 2018-2020, Microsoft Research, Daan Leijen -This is free software; you can redistribute it and/or modify it under the -terms of the MIT license. A copy of the license can be found in the file -"LICENSE" at the root of this distribution. ------------------------------------------------------------------------------*/ -#include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" - -#include // memset -#include - -#define MI_PAGE_HUGE_ALIGN (256*1024) - -static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats); - - -// ------------------------------------------------------------------- -// commit mask -// ------------------------------------------------------------------- - -static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false; - } - return true; -} - -static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - if ((commit->mask[i] & cm->mask[i]) != 0) return true; - } - return false; -} - -static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - res->mask[i] = (commit->mask[i] & cm->mask[i]); - } -} - -static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - res->mask[i] &= ~(cm->mask[i]); - } -} - -static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - res->mask[i] |= cm->mask[i]; - } -} - -static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) { - mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); - mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); - if (bitcount == MI_COMMIT_MASK_BITS) { - mi_assert_internal(bitidx==0); - mi_commit_mask_create_full(cm); - } - else if (bitcount == 0) { - mi_commit_mask_create_empty(cm); - } - else { - mi_commit_mask_create_empty(cm); - size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS; - size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS; - while (bitcount > 0) { - mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT); - size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs; - size_t count = (bitcount > avail ? avail : bitcount); - size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs); - cm->mask[i] = mask; - bitcount -= count; - ofs = 0; - i++; - } - } -} - -size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) { - mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); - size_t count = 0; - for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { - size_t mask = cm->mask[i]; - if (~mask == 0) { - count += MI_COMMIT_MASK_FIELD_BITS; - } - else { - for (; mask != 0; mask >>= 1) { // todo: use popcount - if ((mask&1)!=0) count++; - } - } - } - // we use total since for huge segments each commit bit may represent a larger size - return ((total / MI_COMMIT_MASK_BITS) * count); -} - - -size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) { - size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS; - size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS; - size_t mask = 0; - // find first ones - while (i < MI_COMMIT_MASK_FIELD_COUNT) { - mask = cm->mask[i]; - mask >>= ofs; - if (mask != 0) { - while ((mask&1) == 0) { - mask >>= 1; - ofs++; - } - break; - } - i++; - ofs = 0; - } - if (i >= MI_COMMIT_MASK_FIELD_COUNT) { - // not found - *idx = MI_COMMIT_MASK_BITS; - return 0; - } - else { - // found, count ones - size_t count = 0; - *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs; - do { - mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1); - do { - count++; - mask >>= 1; - } while ((mask&1) == 1); - if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) { - i++; - if (i >= MI_COMMIT_MASK_FIELD_COUNT) break; - mask = cm->mask[i]; - ofs = 0; - } - } while ((mask&1) == 1); - mi_assert_internal(count > 0); - return count; - } -} - - -/* -------------------------------------------------------------------------------- - Segment allocation - - If a thread ends, it "abandons" pages with used blocks - and there is an abandoned segment list whose segments can - be reclaimed by still running threads, much like work-stealing. --------------------------------------------------------------------------------- */ - - -/* ----------------------------------------------------------- - Slices ------------------------------------------------------------ */ - - -static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) { - return &segment->slices[segment->slice_entries]; -} - -static uint8_t* mi_slice_start(const mi_slice_t* slice) { - mi_segment_t* segment = _mi_ptr_segment(slice); - mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment)); - return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE)); -} - - -/* ----------------------------------------------------------- - Bins ------------------------------------------------------------ */ -// Use bit scan forward to quickly find the first zero bit if it is available - -static inline size_t mi_slice_bin8(size_t slice_count) { - if (slice_count<=1) return slice_count; - mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT); - slice_count--; - size_t s = mi_bsr(slice_count); // slice_count > 1 - if (s <= 2) return slice_count + 1; - size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4; - return bin; -} - -static inline size_t mi_slice_bin(size_t slice_count) { - mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE); - mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX); - size_t bin = mi_slice_bin8(slice_count); - mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX); - return bin; -} - -static inline size_t mi_slice_index(const mi_slice_t* slice) { - mi_segment_t* segment = _mi_ptr_segment(slice); - ptrdiff_t index = slice - segment->slices; - mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries); - return index; -} - - -/* ----------------------------------------------------------- - Slice span queues ------------------------------------------------------------ */ - -static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) { - // todo: or push to the end? - mi_assert_internal(slice->prev == NULL && slice->next==NULL); - slice->prev = NULL; // paranoia - slice->next = sq->first; - sq->first = slice; - if (slice->next != NULL) slice->next->prev = slice; - else sq->last = slice; - slice->xblock_size = 0; // free -} - -static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) { - size_t bin = mi_slice_bin(slice_count); - mi_span_queue_t* sq = &tld->spans[bin]; - mi_assert_internal(sq->slice_count >= slice_count); - return sq; -} - -static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) { - mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); - // should work too if the queue does not contain slice (which can happen during reclaim) - if (slice->prev != NULL) slice->prev->next = slice->next; - if (slice == sq->first) sq->first = slice->next; - if (slice->next != NULL) slice->next->prev = slice->prev; - if (slice == sq->last) sq->last = slice->prev; - slice->prev = NULL; - slice->next = NULL; - slice->xblock_size = 1; // no more free -} - - -/* ----------------------------------------------------------- - Invariant checking ------------------------------------------------------------ */ - -static bool mi_slice_is_used(const mi_slice_t* slice) { - return (slice->xblock_size > 0); -} - - -#if (MI_DEBUG>=3) -static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) { - for (mi_slice_t* s = sq->first; s != NULL; s = s->next) { - if (s==slice) return true; - } - return false; -} - -static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { - mi_assert_internal(segment != NULL); - mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(segment->abandoned <= segment->used); - mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id()); - mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // can only decommit committed blocks - //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0); - mi_slice_t* slice = &segment->slices[0]; - const mi_slice_t* end = mi_segment_slices_end(segment); - size_t used_count = 0; - mi_span_queue_t* sq; - while(slice < end) { - mi_assert_internal(slice->slice_count > 0); - mi_assert_internal(slice->slice_offset == 0); - size_t index = mi_slice_index(slice); - size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; - if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets - used_count++; - for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) { - mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t)); - mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0); - mi_assert_internal(i==0 || segment->slices[index + i].xblock_size == 1); - } - // and the last entry as well (for coalescing) - const mi_slice_t* last = slice + slice->slice_count - 1; - if (last > slice && last < mi_segment_slices_end(segment)) { - mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t)); - mi_assert_internal(last->slice_count == 0); - mi_assert_internal(last->xblock_size == 1); - } - } - else { // free range of slices; only last slice needs a valid back offset - mi_slice_t* last = &segment->slices[maxindex]; - if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) { - mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset); - } - mi_assert_internal(slice == last || last->slice_count == 0 ); - mi_assert_internal(last->xblock_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->xblock_size==1)); - if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned - sq = mi_span_queue_for(slice->slice_count,tld); - mi_assert_internal(mi_span_queue_contains(sq,slice)); - } - } - slice = &segment->slices[maxindex+1]; - } - mi_assert_internal(slice == end); - mi_assert_internal(used_count == segment->used + 1); - return true; -} -#endif - -/* ----------------------------------------------------------- - Segment size calculations ------------------------------------------------------------ */ - -static size_t mi_segment_info_size(mi_segment_t* segment) { - return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE; -} - -static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t xblock_size, size_t* page_size) -{ - ptrdiff_t idx = slice - segment->slices; - size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE; - // make the start not OS page aligned for smaller blocks to avoid page/cache effects - size_t start_offset = (xblock_size >= MI_INTPTR_SIZE && xblock_size <= 1024 ? MI_MAX_ALIGN_GUARANTEE : 0); - if (page_size != NULL) { *page_size = psize - start_offset; } - return (uint8_t*)segment + ((idx*MI_SEGMENT_SLICE_SIZE) + start_offset); -} - -// Start of the page available memory; can be used on uninitialized pages -uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) -{ - const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page); - uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size); - mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page); - mi_assert_internal(_mi_ptr_segment(p) == segment); - return p; -} - - -static size_t mi_segment_calculate_slices(size_t required, size_t* pre_size, size_t* info_slices) { - size_t page_size = _mi_os_page_size(); - size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size); - size_t guardsize = 0; - - if (MI_SECURE>0) { - // in secure mode, we set up a protected page in between the segment info - // and the page data (and one at the end of the segment) - guardsize = page_size; - required = _mi_align_up(required, page_size); - } - - if (pre_size != NULL) *pre_size = isize; - isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE); - if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE; - size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); - mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0); - return (segment_size / MI_SEGMENT_SLICE_SIZE); -} - - -/* ---------------------------------------------------------------------------- -Segment caches -We keep a small segment cache per thread to increase local -reuse and avoid setting/clearing guard pages in secure mode. -------------------------------------------------------------------------------- */ - -static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { - if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); - else _mi_stat_decrease(&tld->stats->segments,1); - tld->count += (segment_size >= 0 ? 1 : -1); - if (tld->count > tld->peak_count) tld->peak_count = tld->count; - tld->current_size += segment_size; - if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size; -} - -static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { - segment->thread_id = 0; - _mi_segment_map_freed_at(segment); - mi_segments_track_size(-((long)mi_segment_size(segment)),tld); - if (MI_SECURE>0) { - // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set - // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted - size_t os_pagesize = _mi_os_page_size(); - _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); - uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; - _mi_os_unprotect(end, os_pagesize); - } - - // purge delayed decommits now? (no, leave it to the cache) - // mi_segment_delayed_decommit(segment,true,tld->stats); - - // _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats); - const size_t size = mi_segment_size(segment); - if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) { - const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); - if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize); - _mi_abandoned_await_readers(); // wait until safe to free - _mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os); - } -} - -// called by threads that are terminating -void _mi_segment_thread_collect(mi_segments_tld_t* tld) { - MI_UNUSED(tld); - // nothing to do -} - - -/* ----------------------------------------------------------- - Span management ------------------------------------------------------------ */ - -static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) { - mi_assert_internal(_mi_ptr_segment(p) == segment); - mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); - mi_commit_mask_create_empty(cm); - if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return; - const size_t segstart = mi_segment_info_size(segment); - const size_t segsize = mi_segment_size(segment); - if (p >= (uint8_t*)segment + segsize) return; - - size_t pstart = (p - (uint8_t*)segment); - mi_assert_internal(pstart + size <= segsize); - - size_t start; - size_t end; - if (conservative) { - // decommit conservative - start = _mi_align_up(pstart, MI_COMMIT_SIZE); - end = _mi_align_down(pstart + size, MI_COMMIT_SIZE); - mi_assert_internal(start >= segstart); - mi_assert_internal(end <= segsize); - } - else { - // commit liberal - start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE); - end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE); - } - if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area - start = segstart; - } - if (end > segsize) { - end = segsize; - } - - mi_assert_internal(start <= pstart && (pstart + size) <= end); - mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0); - *start_p = (uint8_t*)segment + start; - *full_size = (end > start ? end - start : 0); - if (*full_size == 0) return; - - size_t bitidx = start / MI_COMMIT_SIZE; - mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); - - size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0 - if (bitidx + bitcount > MI_COMMIT_MASK_BITS) { - _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size); - } - mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); - mi_commit_mask_create(bitidx, bitcount, cm); -} - - -static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, size_t size, mi_stats_t* stats) { - mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); - - // try to commit in at least MI_MINIMAL_COMMIT_SIZE sizes. - /* - if (commit && size > 0) { - const size_t csize = _mi_align_up(size, MI_MINIMAL_COMMIT_SIZE); - if (p + csize <= mi_segment_end(segment)) { - size = csize; - } - } - */ - // commit liberal, but decommit conservative - uint8_t* start = NULL; - size_t full_size = 0; - mi_commit_mask_t mask; - mi_segment_commit_mask(segment, !commit/*conservative*/, p, size, &start, &full_size, &mask); - if (mi_commit_mask_is_empty(&mask) || full_size==0) return true; - - if (commit && !mi_commit_mask_all_set(&segment->commit_mask, &mask)) { - bool is_zero = false; - mi_commit_mask_t cmask; - mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); - _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap - if (!_mi_os_commit(start,full_size,&is_zero,stats)) return false; - mi_commit_mask_set(&segment->commit_mask, &mask); - } - else if (!commit && mi_commit_mask_any_set(&segment->commit_mask, &mask)) { - mi_assert_internal((void*)start != (void*)segment); - //mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &mask)); - - mi_commit_mask_t cmask; - mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); - _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap - if (segment->allow_decommit) { - _mi_os_decommit(start, full_size, stats); // ok if this fails - } - mi_commit_mask_clear(&segment->commit_mask, &mask); - } - // increase expiration of reusing part of the delayed decommit - if (commit && mi_commit_mask_any_set(&segment->decommit_mask, &mask)) { - segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay); - } - // always undo delayed decommits - mi_commit_mask_clear(&segment->decommit_mask, &mask); - return true; -} - -static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { - mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); - // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow - if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed - return mi_segment_commitx(segment,true,p,size,stats); -} - -static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { - if (!segment->allow_decommit) return; - if (mi_option_get(mi_option_decommit_delay) == 0) { - mi_segment_commitx(segment, false, p, size, stats); - } - else { - // register for future decommit in the decommit mask - uint8_t* start = NULL; - size_t full_size = 0; - mi_commit_mask_t mask; - mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask); - if (mi_commit_mask_is_empty(&mask) || full_size==0) return; - - // update delayed commit - mi_assert_internal(segment->decommit_expire > 0 || mi_commit_mask_is_empty(&segment->decommit_mask)); - mi_commit_mask_t cmask; - mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more - mi_commit_mask_set(&segment->decommit_mask, &cmask); - mi_msecs_t now = _mi_clock_now(); - if (segment->decommit_expire == 0) { - // no previous decommits, initialize now - segment->decommit_expire = now + mi_option_get(mi_option_decommit_delay); - } - else if (segment->decommit_expire <= now) { - // previous decommit mask already expired - // mi_segment_delayed_decommit(segment, true, stats); - segment->decommit_expire = now + mi_option_get(mi_option_decommit_extend_delay); // (mi_option_get(mi_option_decommit_delay) / 8); // wait a tiny bit longer in case there is a series of free's - } - else { - // previous decommit mask is not yet expired, increase the expiration by a bit. - segment->decommit_expire += mi_option_get(mi_option_decommit_extend_delay); - } - } -} - -static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) { - if (!segment->allow_decommit || mi_commit_mask_is_empty(&segment->decommit_mask)) return; - mi_msecs_t now = _mi_clock_now(); - if (!force && now < segment->decommit_expire) return; - - mi_commit_mask_t mask = segment->decommit_mask; - segment->decommit_expire = 0; - mi_commit_mask_create_empty(&segment->decommit_mask); - - size_t idx; - size_t count; - mi_commit_mask_foreach(&mask, idx, count) { - // if found, decommit that sequence - if (count > 0) { - uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE); - size_t size = count * MI_COMMIT_SIZE; - mi_segment_commitx(segment, false, p, size, stats); - } - } - mi_commit_mask_foreach_end() - mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); -} - - -static bool mi_segment_is_abandoned(mi_segment_t* segment) { - return (segment->thread_id == 0); -} - -// note: can be called on abandoned segments -static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { - mi_assert_internal(slice_index < segment->slice_entries); - mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) - ? NULL : mi_span_queue_for(slice_count,tld)); - if (slice_count==0) slice_count = 1; - mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries); - - // set first and last slice (the intermediates can be undetermined) - mi_slice_t* slice = &segment->slices[slice_index]; - slice->slice_count = (uint32_t)slice_count; - mi_assert_internal(slice->slice_count == slice_count); // no overflow? - slice->slice_offset = 0; - if (slice_count > 1) { - mi_slice_t* last = &segment->slices[slice_index + slice_count - 1]; - last->slice_count = 0; - last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1)); - last->xblock_size = 0; - } - - // perhaps decommit - mi_segment_perhaps_decommit(segment,mi_slice_start(slice),slice_count*MI_SEGMENT_SLICE_SIZE,tld->stats); - - // and push it on the free page queue (if it was not a huge page) - if (sq != NULL) mi_span_queue_push( sq, slice ); - else slice->xblock_size = 0; // mark huge page as free anyways -} - -/* -// called from reclaim to add existing free spans -static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) { - mi_segment_t* segment = _mi_ptr_segment(slice); - mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); - size_t slice_index = mi_slice_index(slice); - mi_segment_span_free(segment,slice_index,slice->slice_count,tld); -} -*/ - -static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) { - mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->xblock_size==0); - mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE); - mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld); - mi_span_queue_delete(sq, slice); -} - -// note: can be called on abandoned segments -static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) { - mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0); - mi_segment_t* segment = _mi_ptr_segment(slice); - bool is_abandoned = mi_segment_is_abandoned(segment); - - // for huge pages, just mark as free but don't add to the queues - if (segment->kind == MI_SEGMENT_HUGE) { - mi_assert_internal(segment->used == 1); // decreased right after this call in `mi_segment_page_clear` - slice->xblock_size = 0; // mark as free anyways - // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to - // avoid a possible cache miss (and the segment is about to be freed) - return slice; - } - - // otherwise coalesce the span and add to the free span queues - size_t slice_count = slice->slice_count; - mi_slice_t* next = slice + slice->slice_count; - mi_assert_internal(next <= mi_segment_slices_end(segment)); - if (next < mi_segment_slices_end(segment) && next->xblock_size==0) { - // free next block -- remove it from free and merge - mi_assert_internal(next->slice_count > 0 && next->slice_offset==0); - slice_count += next->slice_count; // extend - if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); } - } - if (slice > segment->slices) { - mi_slice_t* prev = mi_slice_first(slice - 1); - mi_assert_internal(prev >= segment->slices); - if (prev->xblock_size==0) { - // free previous slice -- remove it from free and merge - mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0); - slice_count += prev->slice_count; - if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); } - slice = prev; - } - } - - // and add the new free page - mi_segment_span_free(segment, mi_slice_index(slice), slice_count, tld); - return slice; -} - - -static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) { - mi_assert_internal(_mi_ptr_segment(slice)==segment); - mi_assert_internal(slice->slice_count >= slice_count); - mi_assert_internal(slice->xblock_size > 0); // no more in free queue - if (slice->slice_count <= slice_count) return; - mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); - size_t next_index = mi_slice_index(slice) + slice_count; - size_t next_count = slice->slice_count - slice_count; - mi_segment_span_free(segment, next_index, next_count, tld); - slice->slice_count = (uint32_t)slice_count; -} - -// Note: may still return NULL if committing the memory failed -static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { - mi_assert_internal(slice_index < segment->slice_entries); - mi_slice_t* slice = &segment->slices[slice_index]; - mi_assert_internal(slice->xblock_size==0 || slice->xblock_size==1); - - // commit before changing the slice data - if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) { - return NULL; // commit failed! - } - - // convert the slices to a page - slice->slice_offset = 0; - slice->slice_count = (uint32_t)slice_count; - mi_assert_internal(slice->slice_count == slice_count); - const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE; - slice->xblock_size = (uint32_t)(bsize >= MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : bsize); - mi_page_t* page = mi_slice_to_page(slice); - mi_assert_internal(mi_page_block_size(page) == bsize); - - // set slice back pointers for the first MI_MAX_SLICE_OFFSET entries - size_t extra = slice_count-1; - if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET; - if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices - slice++; - for (size_t i = 1; i <= extra; i++, slice++) { - slice->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i); - slice->slice_count = 0; - slice->xblock_size = 1; - } - - // and also for the last one (if not set already) (the last one is needed for coalescing) - // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543) - mi_slice_t* last = &((mi_slice_t*)segment->slices)[slice_index + slice_count - 1]; - if (last < mi_segment_slices_end(segment) && last >= slice) { - last->slice_offset = (uint32_t)(sizeof(mi_slice_t)*(slice_count-1)); - last->slice_count = 0; - last->xblock_size = 1; - } - - // and initialize the page - page->is_reset = false; - page->is_committed = true; - segment->used++; - return page; -} - -static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) { - mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX); - // search from best fit up - mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld); - if (slice_count == 0) slice_count = 1; - while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) { - for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) { - if (slice->slice_count >= slice_count) { - // found one - mi_segment_t* segment = _mi_ptr_segment(slice); - if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) { - // found a suitable page span - mi_span_queue_delete(sq, slice); - - if (slice->slice_count > slice_count) { - mi_segment_slice_split(segment, slice, slice_count, tld); - } - mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0); - mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld); - if (page == NULL) { - // commit failed; return NULL but first restore the slice - mi_segment_span_free_coalesce(slice, tld); - return NULL; - } - return page; - } - } - } - sq++; - } - // could not find a page.. - return NULL; -} - - -/* ----------------------------------------------------------- - Segment allocation ------------------------------------------------------------ */ - -// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . -static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) -{ - mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); - mi_assert_internal((segment==NULL) || (segment!=NULL && required==0)); - // calculate needed sizes first - size_t info_slices; - size_t pre_size; - const size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices); - const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); - const size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE; - - // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) - const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems - _mi_current_thread_count() > 1 && // do not delay for the first N threads - tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); - const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); - bool commit = eager || (required > 0); - - // Try to get from our cache first - bool is_zero = false; - const bool commit_info_still_good = (segment != NULL); - mi_commit_mask_t commit_mask; - mi_commit_mask_t decommit_mask; - if (segment != NULL) { - commit_mask = segment->commit_mask; - decommit_mask = segment->decommit_mask; - } - else { - mi_commit_mask_create_empty(&commit_mask); - mi_commit_mask_create_empty(&decommit_mask); - } - if (segment==NULL) { - // Allocate the segment from the OS - bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy - bool is_pinned = false; - size_t memid = 0; - segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld); - if (segment==NULL) { - segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, req_arena_id, &memid, os_tld); - if (segment == NULL) return NULL; // failed to allocate - if (commit) { - mi_commit_mask_create_full(&commit_mask); - } - else { - mi_commit_mask_create_empty(&commit_mask); - } - } - mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); - - const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); - mi_assert_internal(commit_needed>0); - mi_commit_mask_t commit_needed_mask; - mi_commit_mask_create(0, commit_needed, &commit_needed_mask); - if (!mi_commit_mask_all_set(&commit_mask, &commit_needed_mask)) { - // at least commit the info slices - mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= info_slices*MI_SEGMENT_SLICE_SIZE); - bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, &is_zero, tld->stats); - if (!ok) return NULL; // failed to commit - mi_commit_mask_set(&commit_mask, &commit_needed_mask); - } - mi_track_mem_undefined(segment,commit_needed); - segment->memid = memid; - segment->mem_is_pinned = is_pinned; - segment->mem_is_large = mem_large; - segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask); - mi_segments_track_size((long)(segment_size), tld); - _mi_segment_map_allocated_at(segment); - } - - // zero the segment info? -- not always needed as it is zero initialized from the OS - mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan - if (!is_zero) { - ptrdiff_t ofs = offsetof(mi_segment_t, next); - size_t prefix = offsetof(mi_segment_t, slices) - ofs; - memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*segment_slices); - } - - if (!commit_info_still_good) { - segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed - segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large); - if (segment->allow_decommit) { - segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay); - segment->decommit_mask = decommit_mask; - mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); - #if MI_DEBUG>2 - const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); - mi_commit_mask_t commit_needed_mask; - mi_commit_mask_create(0, commit_needed, &commit_needed_mask); - mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask)); - #endif - } - else { - mi_assert_internal(mi_commit_mask_is_empty(&decommit_mask)); - segment->decommit_expire = 0; - mi_commit_mask_create_empty( &segment->decommit_mask ); - mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); - } - } - - - // initialize segment info - segment->segment_slices = segment_slices; - segment->segment_info_slices = info_slices; - segment->thread_id = _mi_thread_id(); - segment->cookie = _mi_ptr_cookie(segment); - segment->slice_entries = slice_entries; - segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); - - // memset(segment->slices, 0, sizeof(mi_slice_t)*(info_slices+1)); - _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); - - // set up guard pages - size_t guard_slices = 0; - if (MI_SECURE>0) { - // in secure mode, we set up a protected page in between the segment info - // and the page data, and at the end of the segment. - size_t os_pagesize = _mi_os_page_size(); - mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size); - _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); - uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; - mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats); - _mi_os_protect(end, os_pagesize); - if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-( - guard_slices = 1; - } - - // reserve first slices for segment info - mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); - mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance - mi_assert_internal(segment->used == 1); - segment->used = 0; // don't count our internal slices towards usage - - // initialize initial free pages - if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page - mi_assert_internal(huge_page==NULL); - mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, tld); - } - else { - mi_assert_internal(huge_page!=NULL); - mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask)); - mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); - *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); - mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance - } - - mi_assert_expensive(mi_segment_is_valid(segment,tld)); - return segment; -} - - -// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . -static mi_segment_t* mi_segment_alloc(size_t required, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) { - return mi_segment_init(NULL, required, req_arena_id, tld, os_tld, huge_page); -} - - -static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { - MI_UNUSED(force); - mi_assert_internal(segment != NULL); - mi_assert_internal(segment->next == NULL); - mi_assert_internal(segment->used == 0); - - // Remove the free pages - mi_slice_t* slice = &segment->slices[0]; - const mi_slice_t* end = mi_segment_slices_end(segment); - size_t page_count = 0; - while (slice < end) { - mi_assert_internal(slice->slice_count > 0); - mi_assert_internal(slice->slice_offset == 0); - mi_assert_internal(mi_slice_index(slice)==0 || slice->xblock_size == 0); // no more used pages .. - if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) { - mi_segment_span_remove_from_queue(slice, tld); - } - page_count++; - slice = slice + slice->slice_count; - } - mi_assert_internal(page_count == 2); // first page is allocated by the segment itself - - // stats - _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); - - // return it to the OS - mi_segment_os_free(segment, tld); -} - - -/* ----------------------------------------------------------- - Page Free ------------------------------------------------------------ */ - -static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld); - -// note: can be called on abandoned pages -static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) { - mi_assert_internal(page->xblock_size > 0); - mi_assert_internal(mi_page_all_free(page)); - mi_segment_t* segment = _mi_ptr_segment(page); - mi_assert_internal(segment->used > 0); - - size_t inuse = page->capacity * mi_page_block_size(page); - _mi_stat_decrease(&tld->stats->page_committed, inuse); - _mi_stat_decrease(&tld->stats->pages, 1); - - // reset the page memory to reduce memory pressure? - if (!segment->mem_is_pinned && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) { - size_t psize; - uint8_t* start = _mi_page_start(segment, page, &psize); - page->is_reset = true; - _mi_os_reset(start, psize, tld->stats); - } - - // zero the page data, but not the segment fields - page->is_zero_init = false; - ptrdiff_t ofs = offsetof(mi_page_t, capacity); - memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs); - page->xblock_size = 1; - - // and free it - mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); - segment->used--; - // cannot assert segment valid as it is called during reclaim - // mi_assert_expensive(mi_segment_is_valid(segment, tld)); - return slice; -} - -void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) -{ - mi_assert(page != NULL); - - mi_segment_t* segment = _mi_page_segment(page); - mi_assert_expensive(mi_segment_is_valid(segment,tld)); - - // mark it as free now - mi_segment_page_clear(page, tld); - mi_assert_expensive(mi_segment_is_valid(segment, tld)); - - if (segment->used == 0) { - // no more used pages; remove from the free list and free the segment - mi_segment_free(segment, force, tld); - } - else if (segment->used == segment->abandoned) { - // only abandoned pages; remove from free list and abandon - mi_segment_abandon(segment,tld); - } -} - - -/* ----------------------------------------------------------- -Abandonment - -When threads terminate, they can leave segments with -live blocks (reachable through other threads). Such segments -are "abandoned" and will be reclaimed by other threads to -reuse their pages and/or free them eventually - -We maintain a global list of abandoned segments that are -reclaimed on demand. Since this is shared among threads -the implementation needs to avoid the A-B-A problem on -popping abandoned segments: -We use tagged pointers to avoid accidentially identifying -reused segments, much like stamped references in Java. -Secondly, we maintain a reader counter to avoid resetting -or decommitting segments that have a pending read operation. - -Note: the current implementation is one possible design; -another way might be to keep track of abandoned segments -in the arenas/segment_cache's. This would have the advantage of keeping -all concurrent code in one place and not needing to deal -with ABA issues. The drawback is that it is unclear how to -scan abandoned segments efficiently in that case as they -would be spread among all other segments in the arenas. ------------------------------------------------------------ */ - -// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers -// to put in a tag that increments on update to avoid the A-B-A problem. -#define MI_TAGGED_MASK MI_SEGMENT_MASK -typedef uintptr_t mi_tagged_segment_t; - -static mi_segment_t* mi_tagged_segment_ptr(mi_tagged_segment_t ts) { - return (mi_segment_t*)(ts & ~MI_TAGGED_MASK); -} - -static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_segment_t ts) { - mi_assert_internal(((uintptr_t)segment & MI_TAGGED_MASK) == 0); - uintptr_t tag = ((ts & MI_TAGGED_MASK) + 1) & MI_TAGGED_MASK; - return ((uintptr_t)segment | tag); -} - -// This is a list of visited abandoned pages that were full at the time. -// this list migrates to `abandoned` when that becomes NULL. The use of -// this list reduces contention and the rate at which segments are visited. -static mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL - -// The abandoned page list (tagged as it supports pop) -static mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL - -// Maintain these for debug purposes (these counts may be a bit off) -static mi_decl_cache_align _Atomic(size_t) abandoned_count; -static mi_decl_cache_align _Atomic(size_t) abandoned_visited_count; - -// We also maintain a count of current readers of the abandoned list -// in order to prevent resetting/decommitting segment memory if it might -// still be read. -static mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0 - -// Push on the visited list -static void mi_abandoned_visited_push(mi_segment_t* segment) { - mi_assert_internal(segment->thread_id == 0); - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL); - mi_assert_internal(segment->next == NULL); - mi_assert_internal(segment->used > 0); - mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited); - do { - mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, anext); - } while (!mi_atomic_cas_ptr_weak_release(mi_segment_t, &abandoned_visited, &anext, segment)); - mi_atomic_increment_relaxed(&abandoned_visited_count); -} - -// Move the visited list to the abandoned list. -static bool mi_abandoned_visited_revisit(void) -{ - // quick check if the visited list is empty - if (mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited) == NULL) return false; - - // grab the whole visited list - mi_segment_t* first = mi_atomic_exchange_ptr_acq_rel(mi_segment_t, &abandoned_visited, NULL); - if (first == NULL) return false; - - // first try to swap directly if the abandoned list happens to be NULL - mi_tagged_segment_t afirst; - mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); - if (mi_tagged_segment_ptr(ts)==NULL) { - size_t count = mi_atomic_load_relaxed(&abandoned_visited_count); - afirst = mi_tagged_segment(first, ts); - if (mi_atomic_cas_strong_acq_rel(&abandoned, &ts, afirst)) { - mi_atomic_add_relaxed(&abandoned_count, count); - mi_atomic_sub_relaxed(&abandoned_visited_count, count); - return true; - } - } - - // find the last element of the visited list: O(n) - mi_segment_t* last = first; - mi_segment_t* next; - while ((next = mi_atomic_load_ptr_relaxed(mi_segment_t, &last->abandoned_next)) != NULL) { - last = next; - } - - // and atomically prepend to the abandoned list - // (no need to increase the readers as we don't access the abandoned segments) - mi_tagged_segment_t anext = mi_atomic_load_relaxed(&abandoned); - size_t count; - do { - count = mi_atomic_load_relaxed(&abandoned_visited_count); - mi_atomic_store_ptr_release(mi_segment_t, &last->abandoned_next, mi_tagged_segment_ptr(anext)); - afirst = mi_tagged_segment(first, anext); - } while (!mi_atomic_cas_weak_release(&abandoned, &anext, afirst)); - mi_atomic_add_relaxed(&abandoned_count, count); - mi_atomic_sub_relaxed(&abandoned_visited_count, count); - return true; -} - -// Push on the abandoned list. -static void mi_abandoned_push(mi_segment_t* segment) { - mi_assert_internal(segment->thread_id == 0); - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); - mi_assert_internal(segment->next == NULL); - mi_assert_internal(segment->used > 0); - mi_tagged_segment_t next; - mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); - do { - mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, mi_tagged_segment_ptr(ts)); - next = mi_tagged_segment(segment, ts); - } while (!mi_atomic_cas_weak_release(&abandoned, &ts, next)); - mi_atomic_increment_relaxed(&abandoned_count); -} - -// Wait until there are no more pending reads on segments that used to be in the abandoned list -// called for example from `arena.c` before decommitting -void _mi_abandoned_await_readers(void) { - size_t n; - do { - n = mi_atomic_load_acquire(&abandoned_readers); - if (n != 0) mi_atomic_yield(); - } while (n != 0); -} - -// Pop from the abandoned list -static mi_segment_t* mi_abandoned_pop(void) { - mi_segment_t* segment; - // Check efficiently if it is empty (or if the visited list needs to be moved) - mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned); - segment = mi_tagged_segment_ptr(ts); - if mi_likely(segment == NULL) { - if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL - return NULL; - } - } - - // Do a pop. We use a reader count to prevent - // a segment to be decommitted while a read is still pending, - // and a tagged pointer to prevent A-B-A link corruption. - // (this is called from `region.c:_mi_mem_free` for example) - mi_atomic_increment_relaxed(&abandoned_readers); // ensure no segment gets decommitted - mi_tagged_segment_t next = 0; - ts = mi_atomic_load_acquire(&abandoned); - do { - segment = mi_tagged_segment_ptr(ts); - if (segment != NULL) { - mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next); - next = mi_tagged_segment(anext, ts); // note: reads the segment's `abandoned_next` field so should not be decommitted - } - } while (segment != NULL && !mi_atomic_cas_weak_acq_rel(&abandoned, &ts, next)); - mi_atomic_decrement_relaxed(&abandoned_readers); // release reader lock - if (segment != NULL) { - mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); - mi_atomic_decrement_relaxed(&abandoned_count); - } - return segment; -} - -/* ----------------------------------------------------------- - Abandon segment/page ------------------------------------------------------------ */ - -static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { - mi_assert_internal(segment->used == segment->abandoned); - mi_assert_internal(segment->used > 0); - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); - mi_assert_internal(segment->abandoned_visits == 0); - mi_assert_expensive(mi_segment_is_valid(segment,tld)); - - // remove the free pages from the free page queues - mi_slice_t* slice = &segment->slices[0]; - const mi_slice_t* end = mi_segment_slices_end(segment); - while (slice < end) { - mi_assert_internal(slice->slice_count > 0); - mi_assert_internal(slice->slice_offset == 0); - if (slice->xblock_size == 0) { // a free page - mi_segment_span_remove_from_queue(slice,tld); - slice->xblock_size = 0; // but keep it free - } - slice = slice + slice->slice_count; - } - - // perform delayed decommits - mi_segment_delayed_decommit(segment, mi_option_is_enabled(mi_option_abandoned_page_decommit) /* force? */, tld->stats); - - // all pages in the segment are abandoned; add it to the abandoned list - _mi_stat_increase(&tld->stats->segments_abandoned, 1); - mi_segments_track_size(-((long)mi_segment_size(segment)), tld); - segment->thread_id = 0; - mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); - segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned - mi_abandoned_push(segment); -} - -void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { - mi_assert(page != NULL); - mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); - mi_assert_internal(mi_page_heap(page) == NULL); - mi_segment_t* segment = _mi_page_segment(page); - - mi_assert_expensive(mi_segment_is_valid(segment,tld)); - segment->abandoned++; - - _mi_stat_increase(&tld->stats->pages_abandoned, 1); - mi_assert_internal(segment->abandoned <= segment->used); - if (segment->used == segment->abandoned) { - // all pages are abandoned, abandon the entire segment - mi_segment_abandon(segment, tld); - } -} - -/* ----------------------------------------------------------- - Reclaim abandoned pages ------------------------------------------------------------ */ - -static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) { - mi_slice_t* slice = &segment->slices[0]; - *end = mi_segment_slices_end(segment); - mi_assert_internal(slice->slice_count>0 && slice->xblock_size>0); // segment allocated page - slice = slice + slice->slice_count; // skip the first segment allocated page - return slice; -} - -// Possibly free pages and check if free space is available -static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld) -{ - mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); - mi_assert_internal(mi_segment_is_abandoned(segment)); - bool has_page = false; - - // for all slices - const mi_slice_t* end; - mi_slice_t* slice = mi_slices_start_iterate(segment, &end); - while (slice < end) { - mi_assert_internal(slice->slice_count > 0); - mi_assert_internal(slice->slice_offset == 0); - if (mi_slice_is_used(slice)) { // used page - // ensure used count is up to date and collect potential concurrent frees - mi_page_t* const page = mi_slice_to_page(slice); - _mi_page_free_collect(page, false); - if (mi_page_all_free(page)) { - // if this page is all free now, free it without adding to any queues (yet) - mi_assert_internal(page->next == NULL && page->prev==NULL); - _mi_stat_decrease(&tld->stats->pages_abandoned, 1); - segment->abandoned--; - slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! - mi_assert_internal(!mi_slice_is_used(slice)); - if (slice->slice_count >= slices_needed) { - has_page = true; - } - } - else { - if (page->xblock_size == block_size && mi_page_has_any_available(page)) { - // a page has available free blocks of the right size - has_page = true; - } - } - } - else { - // empty span - if (slice->slice_count >= slices_needed) { - has_page = true; - } - } - slice = slice + slice->slice_count; - } - return has_page; -} - -// Reclaim an abandoned segment; returns NULL if the segment was freed -// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full. -static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) { - mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL); - mi_assert_expensive(mi_segment_is_valid(segment, tld)); - if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } - - segment->thread_id = _mi_thread_id(); - segment->abandoned_visits = 0; - mi_segments_track_size((long)mi_segment_size(segment), tld); - mi_assert_internal(segment->next == NULL); - _mi_stat_decrease(&tld->stats->segments_abandoned, 1); - - // for all slices - const mi_slice_t* end; - mi_slice_t* slice = mi_slices_start_iterate(segment, &end); - while (slice < end) { - mi_assert_internal(slice->slice_count > 0); - mi_assert_internal(slice->slice_offset == 0); - if (mi_slice_is_used(slice)) { - // in use: reclaim the page in our heap - mi_page_t* page = mi_slice_to_page(slice); - mi_assert_internal(!page->is_reset); - mi_assert_internal(page->is_committed); - mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); - mi_assert_internal(mi_page_heap(page) == NULL); - mi_assert_internal(page->next == NULL && page->prev==NULL); - _mi_stat_decrease(&tld->stats->pages_abandoned, 1); - segment->abandoned--; - // set the heap again and allow delayed free again - mi_page_set_heap(page, heap); - _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) - _mi_page_free_collect(page, false); // ensure used count is up to date - if (mi_page_all_free(page)) { - // if everything free by now, free the page - slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing - } - else { - // otherwise reclaim it into the heap - _mi_page_reclaim(heap, page); - if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) { - if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } - } - } - } - else { - // the span is free, add it to our page queues - slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing - } - mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0); - slice = slice + slice->slice_count; - } - - mi_assert(segment->abandoned == 0); - if (segment->used == 0) { // due to page_clear - mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed)); - mi_segment_free(segment, false, tld); - return NULL; - } - else { - return segment; - } -} - - -void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { - mi_segment_t* segment; - while ((segment = mi_abandoned_pop()) != NULL) { - mi_segment_reclaim(segment, heap, 0, NULL, tld); - } -} - -static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld) -{ - *reclaimed = false; - mi_segment_t* segment; - long max_tries = mi_option_get_clamp(mi_option_max_segment_reclaim, 8, 1024); // limit the work to bound allocation times - while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { - segment->abandoned_visits++; - // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments - // and push them into the visited list and use many tries. Perhaps we can skip non-suitable ones in a better way? - bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid); - bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees) - if (segment->used == 0) { - // free the segment (by forced reclaim) to make it available to other threads. - // note1: we prefer to free a segment as that might lead to reclaiming another - // segment that is still partially used. - // note2: we could in principle optimize this by skipping reclaim and directly - // freeing but that would violate some invariants temporarily) - mi_segment_reclaim(segment, heap, 0, NULL, tld); - } - else if (has_page && is_suitable) { - // found a large enough free span, or a page of the right block_size with free space - // we return the result of reclaim (which is usually `segment`) as it might free - // the segment due to concurrent frees (in which case `NULL` is returned). - return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); - } - else if (segment->abandoned_visits > 3 && is_suitable) { - // always reclaim on 3rd visit to limit the abandoned queue length. - mi_segment_reclaim(segment, heap, 0, NULL, tld); - } - else { - // otherwise, push on the visited list so it gets not looked at too quickly again - mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed as we may not visit soon again - mi_abandoned_visited_push(segment); - } - } - return NULL; -} - - -void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) -{ - mi_segment_t* segment; - int max_tries = (force ? 16*1024 : 1024); // limit latency - if (force) { - mi_abandoned_visited_revisit(); - } - while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { - mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees) - if (segment->used == 0) { - // free the segment (by forced reclaim) to make it available to other threads. - // note: we could in principle optimize this by skipping reclaim and directly - // freeing but that would violate some invariants temporarily) - mi_segment_reclaim(segment, heap, 0, NULL, tld); - } - else { - // otherwise, decommit if needed and push on the visited list - // note: forced decommit can be expensive if many threads are destroyed/created as in mstress. - mi_segment_delayed_decommit(segment, force, tld->stats); - mi_abandoned_visited_push(segment); - } - } -} - -/* ----------------------------------------------------------- - Reclaim or allocate ------------------------------------------------------------ */ - -static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) -{ - mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); - mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); - - // 1. try to reclaim an abandoned segment - bool reclaimed; - mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); - if (reclaimed) { - // reclaimed the right page right into the heap - mi_assert_internal(segment != NULL); - return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks - } - else if (segment != NULL) { - // reclaimed a segment with a large enough empty span in it - return segment; - } - // 2. otherwise allocate a fresh segment - return mi_segment_alloc(0, heap->arena_id, tld, os_tld, NULL); -} - - -/* ----------------------------------------------------------- - Page allocation ------------------------------------------------------------ */ - -static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) -{ - mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); - - // find a free page - size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE)); - size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE; - mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size); - mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld); - if (page==NULL) { - // no free page, allocate a new segment and try again - if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { - // OOM or reclaimed a good page in the heap - return NULL; - } - else { - // otherwise try again - return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld); - } - } - mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size); - mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id()); - mi_segment_delayed_decommit(_mi_ptr_segment(page), false, tld->stats); - return page; -} - - - -/* ----------------------------------------------------------- - Huge page allocation ------------------------------------------------------------ */ - -static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) -{ - mi_page_t* page = NULL; - mi_segment_t* segment = mi_segment_alloc(size,req_arena_id,tld,os_tld,&page); - if (segment == NULL || page==NULL) return NULL; - mi_assert_internal(segment->used==1); - mi_assert_internal(mi_page_block_size(page) >= size); - segment->thread_id = 0; // huge segments are immediately abandoned - return page; -} - -// free huge block from another thread -void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { - // huge page segments are always abandoned and can be freed immediately by any thread - mi_assert_internal(segment->kind==MI_SEGMENT_HUGE); - mi_assert_internal(segment == _mi_page_segment(page)); - mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0); - - // claim it and free - mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized. - // paranoia: if this it the last reference, the cas should always succeed - size_t expected_tid = 0; - if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) { - mi_block_set_next(page, block, page->free); - page->free = block; - page->used--; - page->is_zero = false; - mi_assert(page->used == 0); - mi_tld_t* tld = heap->tld; - _mi_segment_page_free(page, true, &tld->segments); - } -#if (MI_DEBUG!=0) - else { - mi_assert_internal(false); - } -#endif -} - -/* ----------------------------------------------------------- - Page allocation and free ------------------------------------------------------------ */ -mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { - mi_page_t* page; - if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld); - } - else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld); - } - else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { - page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); - } - else { - page = mi_segment_huge_page_alloc(block_size,heap->arena_id,tld,os_tld); - } - mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid)); - mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); - return page; -} - - diff --git a/depends/mimalloc/src/static.c b/depends/mimalloc/src/static.c index 5b34ddbb6ce0..dd874f1697c6 100644 --- a/depends/mimalloc/src/static.c +++ b/depends/mimalloc/src/static.c @@ -14,26 +14,28 @@ terms of the MIT license. A copy of the license can be found in the file #endif #include "mimalloc.h" -#include "mimalloc-internal.h" +#include "mimalloc/internal.h" // For a static override we create a single object file // containing the whole library. If it is linked first // it will override all the standard library allocation // functions (on Unix's). -#include "stats.c" -#include "random.c" -#include "os.c" -#include "bitmap.c" -#include "arena.c" -#include "segment-cache.c" -#include "segment.c" -#include "page.c" -#include "heap.c" -#include "alloc.c" +#include "alloc.c" // includes alloc-override.c and free.c #include "alloc-aligned.c" #include "alloc-posix.c" -#if MI_OSX_ZONE -#include "alloc-override-osx.c" -#endif +#include "arena.c" +#include "arena-meta.c" +#include "bitmap.c" +#include "heap.c" #include "init.c" +#include "libc.c" #include "options.c" +#include "os.c" +#include "page.c" // includes page-queue.c +#include "page-map.c" +#include "random.c" +#include "stats.c" +#include "prim/prim.c" +#if MI_OSX_ZONE +#include "prim/osx/alloc-override-zone.c" +#endif diff --git a/depends/mimalloc/src/stats.c b/depends/mimalloc/src/stats.c index f82c7c67f227..4eba519a44e3 100644 --- a/depends/mimalloc/src/stats.c +++ b/depends/mimalloc/src/stats.c @@ -5,10 +5,10 @@ terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-internal.h" -#include "mimalloc-atomic.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" -#include // fputs, stderr #include // memset #if defined(_MSC_VER) && (_MSC_VER < 1920) @@ -19,121 +19,126 @@ terms of the MIT license. A copy of the license can be found in the file Statistics operations ----------------------------------------------------------- */ -static bool mi_is_in_main(void* stat) { - return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main - && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t))); +static void mi_stat_update_mt(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + // add atomically + int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount); + mi_atomic_maxi64_relaxed(&stat->peak, current + amount); + if (amount > 0) { + mi_atomic_addi64_relaxed(&stat->total, amount); + } } static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { if (amount == 0) return; - if (mi_is_in_main(stat)) - { - // add atomically (for abandoned pages) - int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount); - mi_atomic_maxi64_relaxed(&stat->peak, current + amount); - if (amount > 0) { - mi_atomic_addi64_relaxed(&stat->allocated,amount); - } - else { - mi_atomic_addi64_relaxed(&stat->freed, -amount); - } - } - else { - // add thread local - stat->current += amount; - if (stat->current > stat->peak) stat->peak = stat->current; - if (amount > 0) { - stat->allocated += amount; - } - else { - stat->freed += -amount; - } - } + // add thread local + stat->current += amount; + if (stat->current > stat->peak) { stat->peak = stat->current; } + if (amount > 0) { stat->total += amount; } } -void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { - if (mi_is_in_main(stat)) { - mi_atomic_addi64_relaxed( &stat->count, 1 ); - mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount ); - } - else { - stat->count++; - stat->total += amount; - } + +void __mi_stat_counter_increase_mt(mi_stat_counter_t* stat, size_t amount) { + mi_atomic_addi64_relaxed(&stat->total, (int64_t)amount); +} + +void __mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { + stat->total += amount; } -void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { +void __mi_stat_increase_mt(mi_stat_count_t* stat, size_t amount) { + mi_stat_update_mt(stat, (int64_t)amount); +} +void __mi_stat_increase(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, (int64_t)amount); } -void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { +void __mi_stat_decrease_mt(mi_stat_count_t* stat, size_t amount) { + mi_stat_update_mt(stat, -((int64_t)amount)); +} +void __mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { mi_stat_update(stat, -((int64_t)amount)); } + +// Adjust stats to compensate; for example before committing a range, +// first adjust downwards with parts that were already committed so +// we avoid double counting. +static void mi_stat_adjust_mt(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + // adjust atomically + mi_atomic_addi64_relaxed(&stat->current, amount); + mi_atomic_addi64_relaxed(&stat->total, amount); +} + +static void mi_stat_adjust(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + stat->current += amount; + stat->total += amount; +} + +void __mi_stat_adjust_increase_mt(mi_stat_count_t* stat, size_t amount) { + mi_stat_adjust_mt(stat, (int64_t)amount); +} +void __mi_stat_adjust_increase(mi_stat_count_t* stat, size_t amount) { + mi_stat_adjust(stat, (int64_t)amount); +} +void __mi_stat_adjust_decrease_mt(mi_stat_count_t* stat, size_t amount) { + mi_stat_adjust_mt(stat, -((int64_t)amount)); +} +void __mi_stat_adjust_decrease(mi_stat_count_t* stat, size_t amount) { + mi_stat_adjust(stat, -((int64_t)amount)); +} + + // must be thread safe as it is called from stats_merge -static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) { +static void mi_stat_count_add_mt(mi_stat_count_t* stat, const mi_stat_count_t* src) { if (stat==src) return; - if (src->allocated==0 && src->freed==0) return; - mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit); - mi_atomic_addi64_relaxed( &stat->current, src->current * unit); - mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit); - // peak scores do not work across threads.. - mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit); + mi_atomic_void_addi64_relaxed(&stat->total, &src->total); + mi_atomic_void_addi64_relaxed(&stat->current, &src->current); + // peak scores do really not work across threads .. we just add them + mi_atomic_void_addi64_relaxed( &stat->peak, &src->peak); + // or, take the max? + // mi_atomic_maxi64_relaxed(&stat->peak, src->peak); } -static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) { +static void mi_stat_counter_add_mt(mi_stat_counter_t* stat, const mi_stat_counter_t* src) { if (stat==src) return; - mi_atomic_addi64_relaxed( &stat->total, src->total * unit); - mi_atomic_addi64_relaxed( &stat->count, src->count * unit); + mi_atomic_void_addi64_relaxed(&stat->total, &src->total); } +#define MI_STAT_COUNT(stat) mi_stat_count_add_mt(&stats->stat, &src->stat); +#define MI_STAT_COUNTER(stat) mi_stat_counter_add_mt(&stats->stat, &src->stat); + // must be thread safe as it is called from stats_merge static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { if (stats==src) return; - mi_stat_add(&stats->segments, &src->segments,1); - mi_stat_add(&stats->pages, &src->pages,1); - mi_stat_add(&stats->reserved, &src->reserved, 1); - mi_stat_add(&stats->committed, &src->committed, 1); - mi_stat_add(&stats->reset, &src->reset, 1); - mi_stat_add(&stats->page_committed, &src->page_committed, 1); - - mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1); - mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1); - mi_stat_add(&stats->threads, &src->threads, 1); - - mi_stat_add(&stats->malloc, &src->malloc, 1); - mi_stat_add(&stats->segments_cache, &src->segments_cache, 1); - mi_stat_add(&stats->normal, &src->normal, 1); - mi_stat_add(&stats->huge, &src->huge, 1); - mi_stat_add(&stats->large, &src->large, 1); - - mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1); - mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1); - mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1); - - mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1); - mi_stat_counter_add(&stats->searches, &src->searches, 1); - mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1); - mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1); - mi_stat_counter_add(&stats->large_count, &src->large_count, 1); -#if MI_STAT>1 + + // copy all fields + MI_STAT_FIELDS() + + #if MI_STAT>1 for (size_t i = 0; i <= MI_BIN_HUGE; i++) { - if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) { - mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i], 1); - } + mi_stat_count_add_mt(&stats->malloc_bins[i], &src->malloc_bins[i]); + } + #endif + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_stat_count_add_mt(&stats->page_bins[i], &src->page_bins[i]); } -#endif } +#undef MI_STAT_COUNT +#undef MI_STAT_COUNTER + /* ----------------------------------------------------------- Display statistics ----------------------------------------------------------- */ -// unit > 0 : size in binary bytes +// unit > 0 : size in binary bytes // unit == 0: count as decimal // unit < 0 : count in binary static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { - char buf[32]; buf[0] = 0; + char buf[32]; _mi_memzero_var(buf); int len = 32; const char* suffix = (unit <= 0 ? " " : "B"); const int64_t base = (unit == 0 ? 1000 : 1024); @@ -142,11 +147,11 @@ static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* const int64_t pos = (n < 0 ? -n : n); if (pos < base) { if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column - snprintf(buf, len, "%d %-3s", (int)n, (n==0 ? "" : suffix)); + _mi_snprintf(buf, len, "%lld %-3s", (long long)n, (n==0 ? "" : suffix)); } } else { - int64_t divider = base; + int64_t divider = base; const char* magnitude = "K"; if (pos >= divider*base) { divider *= base; magnitude = "M"; } if (pos >= divider*base) { divider *= base; magnitude = "G"; } @@ -154,10 +159,10 @@ static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* const long whole = (long)(tens/10); const long frac1 = (long)(tens%10); char unitdesc[8]; - snprintf(unitdesc, 8, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix); - snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc); + _mi_snprintf(unitdesc, 8, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix); + _mi_snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc); } - _mi_fprintf(out, arg, (fmt==NULL ? "%11s" : fmt), buf); + _mi_fprintf(out, arg, (fmt==NULL ? "%12s" : fmt), buf); } @@ -166,58 +171,80 @@ static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out, void* a } static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { - if (unit==1) _mi_fprintf(out, arg, "%11s"," "); + if (unit==1) _mi_fprintf(out, arg, "%12s"," "); else mi_print_amount(n,0,out,arg); } -static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg ) { +static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) { _mi_fprintf(out, arg,"%10s:", msg); - if (unit>0) { - mi_print_amount(stat->peak, unit, out, arg); - mi_print_amount(stat->allocated, unit, out, arg); - mi_print_amount(stat->freed, unit, out, arg); - mi_print_amount(stat->current, unit, out, arg); - mi_print_amount(unit, 1, out, arg); - mi_print_count(stat->allocated, unit, out, arg); - if (stat->allocated > stat->freed) - _mi_fprintf(out, arg, " not all freed!\n"); - else - _mi_fprintf(out, arg, " ok\n"); - } - else if (unit<0) { - mi_print_amount(stat->peak, -1, out, arg); - mi_print_amount(stat->allocated, -1, out, arg); - mi_print_amount(stat->freed, -1, out, arg); - mi_print_amount(stat->current, -1, out, arg); - if (unit==-1) { - _mi_fprintf(out, arg, "%22s", ""); + if (unit != 0) { + if (unit > 0) { + mi_print_amount(stat->peak, unit, out, arg); + mi_print_amount(stat->total, unit, out, arg); + // mi_print_amount(stat->freed, unit, out, arg); + mi_print_amount(stat->current, unit, out, arg); + mi_print_amount(unit, 1, out, arg); + mi_print_count(stat->total, unit, out, arg); } else { - mi_print_amount(-unit, 1, out, arg); - mi_print_count((stat->allocated / -unit), 0, out, arg); + mi_print_amount(stat->peak, -1, out, arg); + mi_print_amount(stat->total, -1, out, arg); + // mi_print_amount(stat->freed, -1, out, arg); + mi_print_amount(stat->current, -1, out, arg); + if (unit == -1) { + _mi_fprintf(out, arg, "%24s", ""); + } + else { + mi_print_amount(-unit, 1, out, arg); + mi_print_count((stat->total / -unit), 0, out, arg); + } } - if (stat->allocated > stat->freed) - _mi_fprintf(out, arg, " not all freed!\n"); - else + if (stat->current != 0) { + _mi_fprintf(out, arg, " "); + _mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok)); + _mi_fprintf(out, arg, "\n"); + } + else { _mi_fprintf(out, arg, " ok\n"); + } } else { mi_print_amount(stat->peak, 1, out, arg); - mi_print_amount(stat->allocated, 1, out, arg); - _mi_fprintf(out, arg, "%11s", " "); // no freed + mi_print_amount(stat->total, 1, out, arg); + _mi_fprintf(out, arg, "%11s", " "); // no freed mi_print_amount(stat->current, 1, out, arg); _mi_fprintf(out, arg, "\n"); } } +static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + mi_stat_print_ex(stat, msg, unit, out, arg, NULL); +} + +static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->peak, unit, out, arg); + _mi_fprintf(out, arg, "\n"); +} + +#if MI_STAT>1 +static void mi_stat_total_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + _mi_fprintf(out, arg, "%10s:", msg); + _mi_fprintf(out, arg, "%12s", " "); // no peak + mi_print_amount(stat->total, unit, out, arg); + _mi_fprintf(out, arg, "\n"); +} +#endif + static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { _mi_fprintf(out, arg, "%10s:", msg); mi_print_amount(stat->total, -1, out, arg); _mi_fprintf(out, arg, "\n"); } + static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) { - const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count)); + const int64_t avg_tens = (stat->total == 0 ? 0 : (stat->total*10 / stat->total)); const long avg_whole = (long)(avg_tens/10); const long avg_frac1 = (long)(avg_tens%10); _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); @@ -225,7 +252,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* static void mi_print_header(mi_output_fun* out, void* arg ) { - _mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); + _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "current ", "block ", "total# "); } #if MI_STAT>1 @@ -233,10 +260,10 @@ static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const c bool found = false; char buf[64]; for (size_t i = 0; i <= max; i++) { - if (bins[i].allocated > 0) { + if (bins[i].total > 0) { found = true; int64_t unit = _mi_bin_size((uint8_t)i); - snprintf(buf, 64, "%s %3lu", fmt, (long)i); + _mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i); mi_stat_print(&bins[i], buf, unit, out, arg); } } @@ -257,7 +284,7 @@ typedef struct buffered_s { mi_output_fun* out; // original output function void* arg; // and state char* buf; // local buffer of at least size `count+1` - size_t used; // currently used chars `used <= count` + size_t used; // currently used chars `used <= count` size_t count; // total chars available for output } buffered_t; @@ -283,11 +310,9 @@ static void mi_cdecl mi_buffered_out(const char* msg, void* arg) { // Print statistics //------------------------------------------------------------ -static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults); - -static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept { +void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept { // wrap the output function to be line buffered - char buf[256]; + char buf[256]; _mi_memzero_var(buf); buffered_t buffer = { out0, arg0, NULL, 0, 255 }; buffer.buf = buf; mi_output_fun* out = &mi_buffered_out; @@ -296,91 +321,115 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) // and print using that mi_print_header(out,arg); #if MI_STAT>1 - mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg); + mi_stats_print_bins(stats->malloc_bins, MI_BIN_HUGE, "bin",out,arg); #endif #if MI_STAT - mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg); - mi_stat_print(&stats->large, "large", (stats->large_count.count == 0 ? 1 : -(stats->large.allocated / stats->large_count.count)), out, arg); - mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg); - mi_stat_count_t total = { 0,0,0,0 }; - mi_stat_add(&total, &stats->normal, 1); - mi_stat_add(&total, &stats->large, 1); - mi_stat_add(&total, &stats->huge, 1); - mi_stat_print(&total, "total", 1, out, arg); + mi_stat_print(&stats->malloc_normal, "binned", (stats->malloc_normal_count.total == 0 ? 1 : -1), out, arg); + mi_stat_print(&stats->malloc_huge, "huge", (stats->malloc_huge_count.total == 0 ? 1 : -1), out, arg); + mi_stat_count_t total = { 0,0,0 }; + mi_stat_count_add_mt(&total, &stats->malloc_normal); + mi_stat_count_add_mt(&total, &stats->malloc_huge); + mi_stat_print_ex(&total, "total", 1, out, arg, ""); #endif #if MI_STAT>1 - mi_stat_print(&stats->malloc, "malloc req", 1, out, arg); + mi_stat_total_print(&stats->malloc_requested, "malloc req", 1, out, arg); _mi_fprintf(out, arg, "\n"); #endif - mi_stat_print(&stats->reserved, "reserved", 1, out, arg); - mi_stat_print(&stats->committed, "committed", 1, out, arg); - mi_stat_print(&stats->reset, "reset", 1, out, arg); - mi_stat_print(&stats->page_committed, "touched", 1, out, arg); - mi_stat_print(&stats->segments, "segments", -1, out, arg); - mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); - mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); + mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, ""); + mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, ""); + mi_stat_peak_print(&stats->reset, "reset", 1, out, arg ); + mi_stat_peak_print(&stats->purged, "purged", 1, out, arg ); + mi_stat_print_ex(&stats->page_committed, "touched", 1, out, arg, ""); + // mi_stat_print(&stats->segments, "segments", -1, out, arg); + // mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); + // mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); mi_stat_print(&stats->pages, "pages", -1, out, arg); mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); + mi_stat_counter_print(&stats->pages_reclaim_on_alloc, "-reclaima", out, arg); + mi_stat_counter_print(&stats->pages_reclaim_on_free, "-reclaimf", out, arg); + mi_stat_counter_print(&stats->pages_reabandon_full, "-reabandon", out, arg); + mi_stat_counter_print(&stats->pages_unabandon_busy_wait, "-waits", out, arg); mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); - mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->pages_retire, "-retire", out, arg); + mi_stat_counter_print(&stats->arena_count, "arenas", out, arg); + // mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); + // mi_stat_counter_print(&stats->arena_purges, "-purges", out, arg); + mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg); mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); + mi_stat_counter_print(&stats->reset_calls, "resets", out, arg); + mi_stat_counter_print(&stats->purge_calls, "purges", out, arg); + mi_stat_counter_print(&stats->malloc_guarded_count, "guarded", out, arg); mi_stat_print(&stats->threads, "threads", -1, out, arg); - mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); - _mi_fprintf(out, arg, "%10s: %7zu\n", "numa nodes", _mi_os_numa_node_count()); - - mi_msecs_t elapsed; - mi_msecs_t user_time; - mi_msecs_t sys_time; + mi_stat_counter_print_avg(&stats->page_searches, "searches", out, arg); + _mi_fprintf(out, arg, "%10s: %5i\n", "numa nodes", _mi_os_numa_node_count()); + + size_t elapsed; + size_t user_time; + size_t sys_time; size_t current_rss; size_t peak_rss; size_t current_commit; size_t peak_commit; size_t page_faults; - mi_stat_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); - _mi_fprintf(out, arg, "%10s: %7ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); - _mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, rss: ", "process", - user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults ); + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + _mi_fprintf(out, arg, "%10s: %5zu.%03zu s\n", "elapsed", elapsed/1000, elapsed%1000); + _mi_fprintf(out, arg, "%10s: user: %zu.%03zu s, system: %zu.%03zu s, faults: %zu, rss: ", "process", + user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, page_faults ); mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s"); if (peak_commit > 0) { _mi_fprintf(out, arg, ", commit: "); mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s"); } - _mi_fprintf(out, arg, "\n"); + _mi_fprintf(out, arg, "\n"); } static mi_msecs_t mi_process_start; // = 0 -static mi_stats_t* mi_stats_get_default(void) { - mi_heap_t* heap = mi_heap_get_default(); - return &heap->tld->stats; +// called on process init +void _mi_stats_init(void) { + if (mi_process_start == 0) { mi_process_start = _mi_clock_start(); }; } -static void mi_stats_merge_from(mi_stats_t* stats) { - if (stats != &_mi_stats_main) { - mi_stats_add(&_mi_stats_main, stats); - memset(stats, 0, sizeof(mi_stats_t)); - } + +// return thread local stats +static mi_stats_t* mi_get_tld_stats(void) { + return &_mi_thread_tld()->stats; } void mi_stats_reset(void) mi_attr_noexcept { - mi_stats_t* stats = mi_stats_get_default(); - if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); } - memset(&_mi_stats_main, 0, sizeof(mi_stats_t)); - if (mi_process_start == 0) { mi_process_start = _mi_clock_start(); }; + mi_stats_t* stats = mi_get_tld_stats(); + mi_subproc_t* subproc = _mi_subproc(); + if (stats != &subproc->stats) { _mi_memzero(stats, sizeof(mi_stats_t)); } + _mi_memzero(&subproc->stats, sizeof(mi_stats_t)); + _mi_stats_init(); } -void mi_stats_merge(void) mi_attr_noexcept { - mi_stats_merge_from( mi_stats_get_default() ); + +void _mi_stats_merge_from(mi_stats_t* to, mi_stats_t* from) { + mi_assert_internal(to != NULL && from != NULL); + if (to != from) { + mi_stats_add(to, from); + _mi_memzero(from, sizeof(mi_stats_t)); + } } void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done` - mi_stats_merge_from(stats); + _mi_stats_merge_from(&_mi_subproc()->stats, stats); +} + +void _mi_stats_merge_thread(mi_tld_t* tld) { + mi_assert_internal(tld != NULL && tld->subproc != NULL); + _mi_stats_merge_from( &tld->subproc->stats, &tld->stats ); +} + +void mi_stats_merge(void) mi_attr_noexcept { + _mi_stats_merge_thread( _mi_thread_tld() ); } void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { - mi_stats_merge_from(mi_stats_get_default()); - _mi_stats_print(&_mi_stats_main, out, arg); + mi_stats_merge(); + _mi_stats_print(&_mi_subproc()->stats, out, arg); } void mi_stats_print(void* out) mi_attr_noexcept { @@ -389,53 +438,19 @@ void mi_stats_print(void* out) mi_attr_noexcept { } void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { - _mi_stats_print(mi_stats_get_default(), out, arg); + _mi_stats_print(mi_get_tld_stats(), out, arg); } // ---------------------------------------------------------------- // Basic timer for convenience; use milli-seconds to avoid doubles // ---------------------------------------------------------------- -#ifdef _WIN32 -#include -static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) { - static LARGE_INTEGER mfreq; // = 0 - if (mfreq.QuadPart == 0LL) { - LARGE_INTEGER f; - QueryPerformanceFrequency(&f); - mfreq.QuadPart = f.QuadPart/1000LL; - if (mfreq.QuadPart == 0) mfreq.QuadPart = 1; - } - return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart); -} + +static mi_msecs_t mi_clock_diff; mi_msecs_t _mi_clock_now(void) { - LARGE_INTEGER t; - QueryPerformanceCounter(&t); - return mi_to_msecs(t); -} -#else -#include -#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) -mi_msecs_t _mi_clock_now(void) { - struct timespec t; - #ifdef CLOCK_MONOTONIC - clock_gettime(CLOCK_MONOTONIC, &t); - #else - clock_gettime(CLOCK_REALTIME, &t); - #endif - return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); -} -#else -// low resolution timer -mi_msecs_t _mi_clock_now(void) { - return ((mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000)); + return _mi_prim_clock_now(); } -#endif -#endif - - -static mi_msecs_t mi_clock_diff; mi_msecs_t _mi_clock_start(void) { if (mi_clock_diff == 0.0) { @@ -455,130 +470,210 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start) { // Basic process statistics // -------------------------------------------------------- -#if defined(_WIN32) -#include -#include -#pragma comment(lib,"psapi.lib") - -static mi_msecs_t filetime_msecs(const FILETIME* ftime) { - ULARGE_INTEGER i; - i.LowPart = ftime->dwLowDateTime; - i.HighPart = ftime->dwHighDateTime; - mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds - return msecs; +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept +{ + mi_subproc_t* subproc = _mi_subproc(); + mi_process_info_t pinfo; + _mi_memzero_var(pinfo); + pinfo.elapsed = _mi_clock_end(mi_process_start); + pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)(&subproc->stats.committed.current))); + pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)(&subproc->stats.committed.peak))); + pinfo.current_rss = pinfo.current_commit; + pinfo.peak_rss = pinfo.peak_commit; + pinfo.utime = 0; + pinfo.stime = 0; + pinfo.page_faults = 0; + + _mi_prim_process_info(&pinfo); + + if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX)); + if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX)); + if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX)); + if (current_rss!=NULL) *current_rss = pinfo.current_rss; + if (peak_rss!=NULL) *peak_rss = pinfo.peak_rss; + if (current_commit!=NULL) *current_commit = pinfo.current_commit; + if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit; + if (page_faults!=NULL) *page_faults = pinfo.page_faults; } -static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) -{ - *elapsed = _mi_clock_end(mi_process_start); - FILETIME ct; - FILETIME ut; - FILETIME st; - FILETIME et; - GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); - *utime = filetime_msecs(&ut); - *stime = filetime_msecs(&st); - PROCESS_MEMORY_COUNTERS info; - GetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); - *current_rss = (size_t)info.WorkingSetSize; - *peak_rss = (size_t)info.PeakWorkingSetSize; - *current_commit = (size_t)info.PagefileUsage; - *peak_commit = (size_t)info.PeakPagefileUsage; - *page_faults = (size_t)info.PageFaultCount; -} - -#elif !defined(__wasi__) && (defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__)) -#include -#include -#include - -#if defined(__APPLE__) -#include -#endif -#if defined(__HAIKU__) -#include -#endif +// -------------------------------------------------------- +// Return statistics +// -------------------------------------------------------- -static mi_msecs_t timeval_secs(const struct timeval* tv) { - return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L); +size_t mi_stats_get_bin_size(size_t bin) mi_attr_noexcept { + if (bin > MI_BIN_HUGE) return 0; + return _mi_bin_size(bin); } -static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) -{ - *elapsed = _mi_clock_end(mi_process_start); - struct rusage rusage; - getrusage(RUSAGE_SELF, &rusage); - *utime = timeval_secs(&rusage.ru_utime); - *stime = timeval_secs(&rusage.ru_stime); -#if !defined(__HAIKU__) - *page_faults = rusage.ru_majflt; -#endif - // estimate commit using our stats - *peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak)); - *current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current)); - *current_rss = *current_commit; // estimate -#if defined(__HAIKU__) - // Haiku does not have (yet?) a way to - // get these stats per process - thread_info tid; - area_info mem; - ssize_t c; - get_thread_info(find_thread(0), &tid); - while (get_next_area_info(tid.team, &c, &mem) == B_OK) { - *peak_rss += mem.ram_size; +void mi_stats_get(size_t stats_size, mi_stats_t* stats) mi_attr_noexcept { + if (stats == NULL || stats_size == 0) return; + _mi_memzero(stats, stats_size); + const size_t size = (stats_size > sizeof(mi_stats_t) ? sizeof(mi_stats_t) : stats_size); + _mi_memcpy(stats, &_mi_subproc()->stats, size); + stats->version = MI_STAT_VERSION; +} + + +// -------------------------------------------------------- +// Statics in json format +// -------------------------------------------------------- + +typedef struct mi_heap_buf_s { + char* buf; + size_t size; + size_t used; + bool can_realloc; +} mi_heap_buf_t; + +static bool mi_heap_buf_expand(mi_heap_buf_t* hbuf) { + if (hbuf==NULL) return false; + if (hbuf->buf != NULL && hbuf->size>0) { + hbuf->buf[hbuf->size-1] = 0; + } + if (hbuf->size > SIZE_MAX/2 || !hbuf->can_realloc) return false; + const size_t newsize = (hbuf->size == 0 ? mi_good_size(12*MI_KiB) : 2*hbuf->size); + char* const newbuf = (char*)mi_rezalloc(hbuf->buf, newsize); + if (newbuf == NULL) return false; + hbuf->buf = newbuf; + hbuf->size = newsize; + return true; +} + +static void mi_heap_buf_print(mi_heap_buf_t* hbuf, const char* msg) { + if (msg==NULL || hbuf==NULL) return; + if (hbuf->used + 1 >= hbuf->size && !hbuf->can_realloc) return; + for (const char* src = msg; *src != 0; src++) { + char c = *src; + if (hbuf->used + 1 >= hbuf->size) { + if (!mi_heap_buf_expand(hbuf)) return; + } + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used++] = c; } - *page_faults = 0; -#elif defined(__APPLE__) - *peak_rss = rusage.ru_maxrss; // BSD reports in bytes - struct mach_task_basic_info info; - mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; - if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { - *current_rss = (size_t)info.resident_size; + mi_assert_internal(hbuf->used < hbuf->size); + hbuf->buf[hbuf->used] = 0; +} + +static void mi_heap_buf_print_count_bin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, size_t bin, bool add_comma) { + const size_t binsize = mi_stats_get_bin_size(bin); + const size_t pagesize = (binsize <= MI_SMALL_MAX_OBJ_SIZE ? MI_SMALL_PAGE_SIZE : + (binsize <= MI_MEDIUM_MAX_OBJ_SIZE ? MI_MEDIUM_PAGE_SIZE : + (binsize <= MI_LARGE_MAX_OBJ_SIZE ? MI_LARGE_PAGE_SIZE : 0))); + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"block_size\": %zu, \"page_size\": %zu }%s\n", prefix, stat->total, stat->peak, stat->current, binsize, pagesize, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} + +static void mi_heap_buf_print_count_cbin(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, mi_chunkbin_t bin, bool add_comma) { + const char* cbin = " "; + switch(bin) { + case MI_CBIN_SMALL: cbin = "S"; break; + case MI_CBIN_MEDIUM: cbin = "M"; break; + case MI_CBIN_LARGE: cbin = "L"; break; + case MI_CBIN_OTHER: cbin = "X"; break; + default: cbin = " "; break; } -#else - *peak_rss = rusage.ru_maxrss * 1024; // Linux reports in KiB -#endif + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld, \"bin\": \"%s\" }%s\n", prefix, stat->total, stat->peak, stat->current, cbin, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); } -#else -#ifndef __wasi__ -// WebAssembly instances are not processes -#pragma message("define a way to get process info") -#endif +static void mi_heap_buf_print_count(mi_heap_buf_t* hbuf, const char* prefix, mi_stat_count_t* stat, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, "%s{ \"total\": %lld, \"peak\": %lld, \"current\": %lld }%s\n", prefix, stat->total, stat->peak, stat->current, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} -static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) -{ - *elapsed = _mi_clock_end(mi_process_start); - *peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak)); - *current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current)); - *peak_rss = *peak_commit; - *current_rss = *current_commit; - *page_faults = 0; - *utime = 0; - *stime = 0; +static void mi_heap_buf_print_count_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_count_t* stat) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": ", name); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); + mi_heap_buf_print_count(hbuf, "", stat, true); } -#endif +static void mi_heap_buf_print_value(mi_heap_buf_t* hbuf, const char* name, int64_t val) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %lld,\n", name, val); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); +} -mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept -{ - mi_msecs_t elapsed = 0; - mi_msecs_t utime = 0; - mi_msecs_t stime = 0; - size_t current_rss0 = 0; - size_t peak_rss0 = 0; - size_t current_commit0 = 0; - size_t peak_commit0 = 0; - size_t page_faults0 = 0; - mi_stat_process_info(&elapsed,&utime, &stime, ¤t_rss0, &peak_rss0, ¤t_commit0, &peak_commit0, &page_faults0); - if (elapsed_msecs!=NULL) *elapsed_msecs = (elapsed < 0 ? 0 : (elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)elapsed : PTRDIFF_MAX)); - if (user_msecs!=NULL) *user_msecs = (utime < 0 ? 0 : (utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)utime : PTRDIFF_MAX)); - if (system_msecs!=NULL) *system_msecs = (stime < 0 ? 0 : (stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)stime : PTRDIFF_MAX)); - if (current_rss!=NULL) *current_rss = current_rss0; - if (peak_rss!=NULL) *peak_rss = peak_rss0; - if (current_commit!=NULL) *current_commit = current_commit0; - if (peak_commit!=NULL) *peak_commit = peak_commit0; - if (page_faults!=NULL) *page_faults = page_faults0; +static void mi_heap_buf_print_size(mi_heap_buf_t* hbuf, const char* name, size_t val, bool add_comma) { + char buf[128]; + _mi_snprintf(buf, 128, " \"%s\": %zu%s\n", name, val, (add_comma ? "," : "")); + buf[127] = 0; + mi_heap_buf_print(hbuf, buf); } +static void mi_heap_buf_print_counter_value(mi_heap_buf_t* hbuf, const char* name, mi_stat_counter_t* stat) { + mi_heap_buf_print_value(hbuf, name, stat->total); +} + +#define MI_STAT_COUNT(stat) mi_heap_buf_print_count_value(&hbuf, #stat, &stats->stat); +#define MI_STAT_COUNTER(stat) mi_heap_buf_print_counter_value(&hbuf, #stat, &stats->stat); + +char* mi_stats_get_json(size_t output_size, char* output_buf) mi_attr_noexcept { + mi_stats_merge(); + mi_heap_buf_t hbuf = { NULL, 0, 0, true }; + if (output_size > 0 && output_buf != NULL) { + _mi_memzero(output_buf, output_size); + hbuf.buf = output_buf; + hbuf.size = output_size; + hbuf.can_realloc = false; + } + else { + if (!mi_heap_buf_expand(&hbuf)) return NULL; + } + mi_heap_buf_print(&hbuf, "{\n"); + mi_heap_buf_print_value(&hbuf, "version", MI_STAT_VERSION); + mi_heap_buf_print_value(&hbuf, "mimalloc_version", MI_MALLOC_VERSION); + + // process info + mi_heap_buf_print(&hbuf, " \"process\": {\n"); + size_t elapsed; + size_t user_time; + size_t sys_time; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + mi_heap_buf_print_size(&hbuf, "elapsed_msecs", elapsed, true); + mi_heap_buf_print_size(&hbuf, "user_msecs", user_time, true); + mi_heap_buf_print_size(&hbuf, "system_msecs", sys_time, true); + mi_heap_buf_print_size(&hbuf, "page_faults", page_faults, true); + mi_heap_buf_print_size(&hbuf, "rss_current", current_rss, true); + mi_heap_buf_print_size(&hbuf, "rss_peak", peak_rss, true); + mi_heap_buf_print_size(&hbuf, "commit_current", current_commit, true); + mi_heap_buf_print_size(&hbuf, "commit_peak", peak_commit, false); + mi_heap_buf_print(&hbuf, " },\n"); + + // statistics + mi_stats_t* stats = &_mi_subproc()->stats; + MI_STAT_FIELDS() + + // size bins + mi_heap_buf_print(&hbuf, " \"malloc_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->malloc_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ],\n"); + mi_heap_buf_print(&hbuf, " \"page_bins\": [\n"); + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + mi_heap_buf_print_count_bin(&hbuf, " ", &stats->page_bins[i], i, i!=MI_BIN_HUGE); + } + mi_heap_buf_print(&hbuf, " ],\n"); + mi_heap_buf_print(&hbuf, " \"chunk_bins\": [\n"); + for (size_t i = 0; i < MI_CBIN_COUNT; i++) { + mi_heap_buf_print_count_cbin(&hbuf, " ", &stats->chunk_bins[i], (mi_chunkbin_t)i, i!=MI_CBIN_COUNT-1); + } + mi_heap_buf_print(&hbuf, " ]\n"); + mi_heap_buf_print(&hbuf, "}\n"); + return hbuf.buf; +} diff --git a/depends/mimalloc/test/CMakeLists.txt b/depends/mimalloc/test/CMakeLists.txt index fb4b48111f03..5905613c9717 100644 --- a/depends/mimalloc/test/CMakeLists.txt +++ b/depends/mimalloc/test/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.18) project(mimalloc-test C CXX) set(CMAKE_C_STANDARD 11) @@ -16,10 +16,12 @@ if (NOT CMAKE_BUILD_TYPE) endif() # Import mimalloc (if installed) -find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) +find_package(mimalloc 1.9 CONFIG REQUIRED) message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})") -# overriding with a dynamic library + +# link with a dynamic shared library +# use `LD_PRELOAD` to actually override malloc/free at runtime with mimalloc add_executable(dynamic-override main-override.c) target_link_libraries(dynamic-override PUBLIC mimalloc) @@ -29,9 +31,9 @@ target_link_libraries(dynamic-override-cxx PUBLIC mimalloc) # overriding with a static object file works reliable as the symbols in the # object file have priority over those in library files -add_executable(static-override-obj main-override.c ${MIMALLOC_OBJECT_DIR}/mimalloc.o) +add_executable(static-override-obj main-override.c ${MIMALLOC_OBJECT_DIR}/mimalloc${CMAKE_C_OUTPUT_EXTENSION}) target_include_directories(static-override-obj PUBLIC ${MIMALLOC_INCLUDE_DIR}) -target_link_libraries(static-override-obj PUBLIC pthread) +target_link_libraries(static-override-obj PUBLIC mimalloc-static) # overriding with a static library works too if using the `mimalloc-override.h` diff --git a/depends/mimalloc/test/main-override-dep.cpp b/depends/mimalloc/test/main-override-dep.cpp new file mode 100644 index 000000000000..4f293e7b73be --- /dev/null +++ b/depends/mimalloc/test/main-override-dep.cpp @@ -0,0 +1,60 @@ +// Issue #981: test overriding allocation in a DLL that is compiled independent of mimalloc. +// This is imported by the `mimalloc-test-override` project. +#include +#include +#include "main-override-dep.h" + +std::string TestAllocInDll::GetString() +{ + char* test = new char[128]; + memset(test, 0, 128); + const char* t = "test"; + memcpy(test, t, 4); + std::string r = test; + std::cout << "override-dep: GetString: " << r << "\n"; + delete[] test; + return r; +} + +#include + +void TestAllocInDll::TestHeapAlloc() +{ + HANDLE heap = GetProcessHeap(); + int* p = (int*)HeapAlloc(heap, 0, sizeof(int)); + *p = 42; + HeapFree(heap, 0, p); +} + +class Static { +private: + void* p; +public: + Static() { + printf("override-dep: static constructor\n"); + p = malloc(64); + return; + } + ~Static() { + free(p); + printf("override-dep: static destructor\n"); + return; + } +}; + +static Static s = Static(); + + +#include + +BOOL WINAPI DllMain(HINSTANCE module, DWORD reason, LPVOID reserved) { + (void)(reserved); + (void)(module); + if (reason==DLL_PROCESS_ATTACH) { + printf("override-dep: dll attach\n"); + } + else if (reason==DLL_PROCESS_DETACH) { + printf("override-dep: dll detach\n"); + } + return TRUE; +} diff --git a/depends/mimalloc/test/main-override-dep.h b/depends/mimalloc/test/main-override-dep.h new file mode 100644 index 000000000000..9d4aabfdb649 --- /dev/null +++ b/depends/mimalloc/test/main-override-dep.h @@ -0,0 +1,12 @@ +#pragma once +// Issue #981: test overriding allocation in a DLL that is compiled independent of mimalloc. +// This is imported by the `mimalloc-test-override` project. + +#include + +class TestAllocInDll +{ +public: + __declspec(dllexport) std::string GetString(); + __declspec(dllexport) void TestHeapAlloc(); +}; diff --git a/depends/mimalloc/test/main-override-static.c b/depends/mimalloc/test/main-override-static.c index 70b6293ccc53..3e47874ecc36 100644 --- a/depends/mimalloc/test/main-override-static.c +++ b/depends/mimalloc/test/main-override-static.c @@ -1,3 +1,6 @@ +#if _WIN32 +#include +#endif #include #include #include @@ -7,11 +10,13 @@ #include #include // redefines malloc etc. +static void mi_bins(void); static void double_free1(); static void double_free2(); static void corrupt_free(); static void block_overflow1(); +static void block_overflow2(); static void invalid_free(); static void test_aslr(void); static void test_process_info(void); @@ -19,53 +24,67 @@ static void test_reserved(void); static void negative_stat(void); static void alloc_huge(void); static void test_heap_walk(void); -static void test_heap_arena(void); +static void test_canary_leak(void); +static void test_manage_os_memory(void); +// static void test_large_pages(void); + int main() { mi_version(); mi_stats_reset(); + + // mi_bins(); + + // test_manage_os_memory(); + // test_large_pages(); // detect double frees and heap corruption // double_free1(); // double_free2(); // corrupt_free(); // block_overflow1(); + // block_overflow2(); + test_canary_leak(); // test_aslr(); // invalid_free(); // test_reserved(); // negative_stat(); - // alloc_huge(); // test_heap_walk(); - test_heap_arena(); - + // alloc_huge(); + + void* p1 = malloc(78); void* p2 = malloc(24); free(p1); p1 = mi_malloc(8); char* s = strdup("hello\n"); free(p2); - + + mi_heap_t* h = mi_heap_new(); + mi_heap_set_default(h); + p2 = malloc(16); p1 = realloc(p1, 32); free(p1); free(p2); free(s); - + /* now test if override worked by allocating/freeing across the api's*/ //p1 = mi_malloc(32); //free(p1); //p2 = malloc(32); //mi_free(p2); - + //mi_collect(true); //mi_stats_print(NULL); - + // test_process_info(); + return 0; } static void invalid_free() { free((void*)0xBADBEEF); - realloc((void*)0xBADBEEF,10); + realloc((void*)0xBADBEEF, 10); } static void block_overflow1() { @@ -74,6 +93,12 @@ static void block_overflow1() { free(p); } +static void block_overflow2() { + uint8_t* p = (uint8_t*)mi_malloc(16); + p[17] = 0; + free(p); +} + // The double free samples come ArcHeap [1] by Insu Yun (issue #161) // [1]: https://arxiv.org/pdf/1903.00503.pdf @@ -155,9 +180,9 @@ static void test_process_info(void) { size_t peak_rss = 0; size_t current_commit = 0; size_t peak_commit = 0; - size_t page_faults = 0; + size_t page_faults = 0; for (int i = 0; i < 100000; i++) { - void* p = calloc(100,10); + void* p = calloc(100, 10); free(p); } mi_process_info(&elapsed, &user_msecs, &system_msecs, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); @@ -168,7 +193,7 @@ static void test_reserved(void) { #define KiB 1024ULL #define MiB (KiB*KiB) #define GiB (MiB*KiB) - mi_reserve_os_memory(4*GiB, false, true); + mi_reserve_os_memory(3*GiB, false, true); void* p1 = malloc(100); void* p2 = malloc(100000); void* p3 = malloc(2*GiB); @@ -187,7 +212,7 @@ static void negative_stat(void) { mi_stats_print_out(NULL, NULL); *p = 100; mi_free(p); - mi_stats_print_out(NULL, NULL); + mi_stats_print_out(NULL, NULL); } static void alloc_huge(void) { @@ -207,27 +232,85 @@ static bool test_visit(const mi_heap_t* heap, const mi_heap_area_t* area, void* static void test_heap_walk(void) { mi_heap_t* heap = mi_heap_new(); - //mi_heap_malloc(heap, 2097152); + mi_heap_malloc(heap, 16*2097152); mi_heap_malloc(heap, 2067152); mi_heap_malloc(heap, 2097160); mi_heap_malloc(heap, 24576); mi_heap_visit_blocks(heap, true, &test_visit, NULL); } -static void test_heap_arena(void) { +static void test_canary_leak(void) { + char* p = mi_mallocn_tp(char, 22); + for (int i = 0; i < 22; i++) { + p[i] = '0'+i; + } + puts(p); + free(p); +} + +#if _WIN32 +static void test_manage_os_memory(void) { + size_t size = 256 * 1024 * 1024; + void* ptr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); mi_arena_id_t arena_id; - int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id); - if (err) abort(); - mi_heap_t* heap = mi_heap_new_in_arena(arena_id); - for (int i = 0; i < 500000; i++) { - void* p = mi_heap_malloc(heap, 1024); - if (p == NULL) { - printf("out of memory after %d kb (expecting about 100_000kb)\n", i); - break; - } + mi_manage_os_memory_ex(ptr, size, true /* committed */, true /* pinned */, false /* is zero */, -1 /* numa node */, true /* exclusive */, &arena_id); + mi_heap_t* cuda_heap = mi_heap_new_in_arena(arena_id); // you can do this in any thread + + // now allocate only in the cuda arena + void* p1 = mi_heap_malloc(cuda_heap, 8); + int* p2 = mi_heap_malloc_tp(cuda_heap, int); + *p2 = 42; + + // and maybe set the cuda heap as the default heap? (but careful as now `malloc` will allocate in the cuda heap as well) + { + mi_heap_t* prev_default_heap = mi_heap_set_default(cuda_heap); + void* p3 = mi_malloc(8); // allocate in the cuda heap + mi_free(p3); } + mi_free(p1); + mi_free(p2); +} +#else +static void test_manage_os_memory(void) { + // empty +} +#endif + +// Experiment with huge OS pages +#if 0 + +#include +#include +#include +#include + +static void test_large_pages(void) { + mi_memid_t memid; + +#if 0 + size_t pages_reserved; + size_t page_size; + uint8_t* p = (uint8_t*)_mi_os_alloc_huge_os_pages(1, -1, 30000, &pages_reserved, &page_size, &memid); + const size_t req_size = pages_reserved * page_size; +#else + const size_t req_size = 64*MI_MiB; + uint8_t* p = (uint8_t*)_mi_os_alloc(req_size, &memid, NULL); +#endif + + p[0] = 1; + + //_mi_os_protect(p, _mi_os_page_size()); + //_mi_os_unprotect(p, _mi_os_page_size()); + //_mi_os_decommit(p, _mi_os_page_size(), NULL); + if (madvise(p, req_size, MADV_HUGEPAGE) == 0) { + printf("advised huge pages\n"); + _mi_os_decommit(p, _mi_os_page_size(), NULL); + }; + _mi_os_free(p, req_size, memid, NULL); } +#endif + // ---------------------------- // bin size experiments // ------------------------------ @@ -235,8 +318,8 @@ static void test_heap_arena(void) { #if 0 #include #include +#include -#define MI_INTPTR_SIZE 8 #define MI_LARGE_WSIZE_MAX (4*1024*1024 / MI_INTPTR_SIZE) #define MI_BIN_HUGE 100 @@ -246,11 +329,11 @@ static void test_heap_arena(void) { static inline uint8_t mi_bsr32(uint32_t x); #if defined(_MSC_VER) -#include +//#include #include static inline uint8_t mi_bsr32(uint32_t x) { uint32_t idx; - _BitScanReverse((DWORD*)&idx, x); + _BitScanReverse(&idx, x); return idx; } #elif defined(__GNUC__) || defined(__clang__) @@ -274,7 +357,7 @@ static inline uint8_t mi_bsr32(uint32_t x) { } #endif -/* + // Bit scan reverse: return the index of the highest bit. uint8_t _mi_bsr(uintptr_t x) { if (x == 0) return 0; @@ -287,38 +370,39 @@ uint8_t _mi_bsr(uintptr_t x) { # error "define bsr for non-32 or 64-bit platforms" #endif } -*/ - static inline size_t _mi_wsize_from_size(size_t size) { return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); } +// #define MI_ALIGN2W + // Return the bin for a given field size. // Returns MI_BIN_HUGE if the size is too large. // We use `wsize` for the size in "machine word sizes", // i.e. byte size == `wsize*sizeof(void*)`. -extern inline uint8_t _mi_bin8(size_t size) { - size_t wsize = _mi_wsize_from_size(size); - uint8_t bin; - if (wsize <= 1) { +static inline size_t mi_bin(size_t wsize) { + // size_t wsize = _mi_wsize_from_size(size); + // size_t bin; + /*if (wsize <= 1) { bin = 1; } + */ #if defined(MI_ALIGN4W) - else if (wsize <= 4) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + if (wsize <= 4) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } #elif defined(MI_ALIGN2W) - else if (wsize <= 8) { - bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + if (wsize <= 8) { + return (wsize <= 1 ? 1 : (wsize+1)&~1); // round to double word sizes } #else - else if (wsize <= 8) { - bin = (uint8_t)wsize; + if (wsize <= 8) { + return (wsize == 0 ? 1 : wsize); } #endif else if (wsize > MI_LARGE_WSIZE_MAX) { - bin = MI_BIN_HUGE; + return MI_BIN_HUGE; } else { #if defined(MI_ALIGN4W) @@ -326,15 +410,19 @@ extern inline uint8_t _mi_bin8(size_t size) { #endif wsize--; // find the highest bit - uint8_t b = mi_bsr32((uint32_t)wsize); + size_t idx; + mi_bsr(wsize, &idx); + uint8_t b = (uint8_t)idx; // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). // - adjust with 3 because we use do not round the first 8 sizes // which each get an exact bin - bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + const size_t bin = ((b << 2) + ((wsize >> (b - 2)) & 0x03)) - 3; + assert(bin > 0 && bin < MI_BIN_HUGE); + return bin; } - return bin; } + static inline uint8_t _mi_bin4(size_t size) { size_t wsize = _mi_wsize_from_size(size); uint8_t bin; @@ -358,44 +446,79 @@ static inline uint8_t _mi_bin4(size_t size) { bin = MI_BIN_HUGE; } else { - uint8_t b = mi_bsr32((uint32_t)wsize); + size_t idx; + mi_bsr(wsize, &idx); + uint8_t b = (uint8_t)idx; bin = ((b << 1) + (uint8_t)((wsize >> (b - 1)) & 0x01)) + 3; } return bin; } -static size_t _mi_binx4(size_t bsize) { - if (bsize==0) return 0; - uint8_t b = mi_bsr32((uint32_t)bsize); - if (b <= 1) return bsize; - size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01); +static size_t _mi_binx4(size_t wsize) { + size_t bin; + if (wsize <= 1) { + bin = 1; + } + else if (wsize <= 8) { + // bin = (wsize+1)&~1; // round to double word sizes + bin = (uint8_t)wsize; + } + else { + size_t idx; + mi_bsr(wsize, &idx); + uint8_t b = (uint8_t)idx; + if (b <= 1) return wsize; + bin = ((b << 1) | (wsize >> (b - 1))&0x01) + 3; + } return bin; } static size_t _mi_binx8(size_t bsize) { if (bsize<=1) return bsize; - uint8_t b = mi_bsr32((uint32_t)bsize); + size_t idx; + mi_bsr(bsize, &idx); + uint8_t b = (uint8_t)idx; if (b <= 2) return bsize; size_t bin = ((b << 2) | (bsize >> (b - 2))&0x03) - 5; return bin; } + +static inline size_t mi_binx(size_t wsize) { + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } + else if (wsize <= 8) { + // bin = (wsize+1)&~1; // round to double word sizes + bin = (uint8_t)wsize; + } + else { + wsize--; + assert(wsize>0); + // find the highest bit + uint8_t b = (uint8_t)(MI_SIZE_BITS - 1 - mi_clz(wsize)); + + // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). + // - adjust with 3 because we use do not round the first 8 sizes + // which each get an exact bin + bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + } + return bin; +} + + static void mi_bins(void) { //printf(" QNULL(1), /* 0 */ \\\n "); size_t last_bin = 0; - size_t min_bsize = 0; - size_t last_bsize = 0; - for (size_t bsize = 1; bsize < 2*1024; bsize++) { - size_t size = bsize * 64 * 1024; - size_t bin = _mi_binx8(bsize); + for (size_t wsize = 1; wsize <= (4*1024*1024) / 8 + 1024; wsize++) { + size_t bin = mi_bin(wsize); if (bin != last_bin) { - printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin); - //printf("QNULL(%6zd), ", wsize); - //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin); + //printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_wsize, last_wsize, last_bin); + printf("QNULL(%6zd), ", wsize-1); + if (last_bin%8 == 0) printf("/* %zu */ \\\n ", last_bin); last_bin = bin; - min_bsize = bsize; } - last_bsize = bsize; } } #endif diff --git a/depends/mimalloc/test/main-override.cpp b/depends/mimalloc/test/main-override.cpp index e0dba5a3d77f..408b5ee8f9e7 100644 --- a/depends/mimalloc/test/main-override.cpp +++ b/depends/mimalloc/test/main-override.cpp @@ -9,17 +9,12 @@ #include #include #include - #include -#include #include #ifdef _WIN32 #include -#endif - -#ifdef _WIN32 -#include +#include static void msleep(unsigned long msecs) { Sleep(msecs); } #else #include @@ -32,27 +27,47 @@ static void heap_late_free(); // issue #204 static void padding_shrink(); // issue #209 static void various_tests(); static void test_mt_shutdown(); -static void large_alloc(void); // issue #363 static void fail_aslr(); // issue #372 static void tsan_numa_test(); // issue #414 -static void strdup_test(); // issue #445 -static void bench_alloc_large(void); // issue #xxx +static void strdup_test(); // issue #445 +static void heap_thread_free_huge(); +static void test_std_string(); // issue #697 +static void test_thread_local(); // issue #944 +// static void test_mixed0(); // issue #942 +static void test_mixed1(); // issue #942 +static void test_stl_allocators(); + +#if _WIN32 +#include "main-override-dep.h" +static void test_dep(); // issue #981: test overriding in another DLL +#else +static void test_dep() { }; +#endif int main() { mi_stats_reset(); // ignore earlier allocations - - heap_thread_free_large(); - heap_no_delete(); - heap_late_free(); - padding_shrink(); - various_tests(); - large_alloc(); - tsan_numa_test(); - strdup_test(); - + //various_tests(); + //test_mixed1(); + + test_dep(); + + //test_std_string(); + //test_thread_local(); + // heap_thread_free_huge(); + /* + heap_thread_free_large(); + heap_no_delete(); + heap_late_free(); + padding_shrink(); + + tsan_numa_test(); + */ + /* + strdup_test(); + test_stl_allocators(); test_mt_shutdown(); + */ //fail_aslr(); - bench_alloc_large(); mi_stats_print(NULL); return 0; } @@ -92,6 +107,15 @@ static void various_tests() { delete t; t = new (std::nothrow) Test(42); delete t; + auto tbuf = new unsigned char[sizeof(Test)]; + t = new (tbuf) Test(42); + t->~Test(); + delete[] tbuf; + + #if _WIN32 + const char* ptr = ::_Getdays(); // test _base overrid + free((void*)ptr); + #endif } class Static { @@ -120,6 +144,18 @@ static bool test_stl_allocator1() { struct some_struct { int i; int j; double z; }; + +#if _WIN32 +static void test_dep() +{ + TestAllocInDll t; + std::string s = t.GetString(); + std::cout << "test_dep GetString: " << s << "\n"; + t.TestHeapAlloc(); +} +#endif + + static bool test_stl_allocator2() { std::vector > vec; vec.push_back(some_struct()); @@ -127,6 +163,130 @@ static bool test_stl_allocator2() { return vec.size() == 0; } +#if MI_HAS_HEAP_STL_ALLOCATOR +static bool test_stl_allocator3() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator4() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator5() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator6() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} +#endif + +static void test_stl_allocators() { + test_stl_allocator1(); + test_stl_allocator2(); +#if MI_HAS_HEAP_STL_ALLOCATOR + test_stl_allocator3(); + test_stl_allocator4(); + test_stl_allocator5(); + test_stl_allocator6(); +#endif +} + +#if 0 +#include +#include +#include +#include +#include +#include + +static void test_mixed0() { + std::vector> numbers(1024 * 1024 * 100); + std::vector threads(1); + + std::atomic index{}; + + auto start = std::chrono::system_clock::now(); + + for (auto& thread : threads) { + thread = std::thread{[&index, &numbers]() { + while (true) { + auto i = index.fetch_add(1, std::memory_order_relaxed); + if (i >= numbers.size()) return; + + numbers[i] = std::make_unique(i); + } + }}; + } + + for (auto& thread : threads) thread.join(); + + auto end = std::chrono::system_clock::now(); + + auto duration = + std::chrono::duration_cast(end - start); + std::cout << "Running on " << threads.size() << " threads took " << duration + << std::endl; +} +#endif + +void asd() { + void* p = malloc(128); + free(p); +} +static void test_mixed1() { + std::thread thread(asd); + thread.join(); +} + +#if 0 +// issue #691 +static char* cptr; + +static void* thread1_allocate() +{ + cptr = mi_calloc_tp(char,22085632); + return NULL; +} + +static void* thread2_free() +{ + assert(cptr); + mi_free(cptr); + cptr = NULL; + return NULL; +} + +static void test_large_migrate(void) { + auto t1 = std::thread(thread1_allocate); + t1.join(); + auto t2 = std::thread(thread2_free); + t2.join(); + /* + pthread_t thread1, thread2; + + pthread_create(&thread1, NULL, &thread1_allocate, NULL); + pthread_join(thread1, NULL); + + pthread_create(&thread2, NULL, &thread2_free, NULL); + pthread_join(thread2, NULL); + */ + return; +} +#endif + // issue 445 static void strdup_test() { #ifdef _MSC_VER @@ -142,7 +302,7 @@ static void strdup_test() { // Issue #202 static void heap_no_delete_worker() { mi_heap_t* heap = mi_heap_new(); - void* q = mi_heap_malloc(heap, 1024); + void* q = mi_heap_malloc(heap, 1024); (void)(q); // mi_heap_delete(heap); // uncomment to prevent assertion } @@ -152,6 +312,13 @@ static void heap_no_delete() { } +// Issue #697 +static void test_std_string() { + std::string path = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/0.arm64.lto.o"; + std::string path1 = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/1.arm64.lto.o"; + std::cout << path + "\n>>> " + path1 + "\n>>> " << std::endl; +} + // Issue #204 static volatile void* global_p; @@ -193,12 +360,23 @@ static void heap_thread_free_large_worker() { static void heap_thread_free_large() { for (int i = 0; i < 100; i++) { - shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8); + shared_p = mi_malloc_aligned(2*1024*1024 + 1, 8); auto t1 = std::thread(heap_thread_free_large_worker); t1.join(); } } +static void heap_thread_free_huge_worker() { + mi_free(shared_p); +} + +static void heap_thread_free_huge() { + for (int i = 0; i < 10; i++) { + shared_p = mi_malloc(1024 * 1024 * 1024); + auto t1 = std::thread(heap_thread_free_huge_worker); + t1.join(); + } +} static void test_mt_shutdown() @@ -225,21 +403,9 @@ static void test_mt_shutdown() std::cout << "done" << std::endl; } -// issue #363 -using namespace std; - -void large_alloc(void) -{ - char* a = new char[1ull << 25]; - thread th([&] { - delete[] a; - }); - th.join(); -} - // issue #372 static void fail_aslr() { - size_t sz = (4ULL << 40); // 4TiB + size_t sz = (size_t)(4ULL << 40); // 4TiB void* p = malloc(sz); printf("pointer p: %p: area up to %p\n", p, (uint8_t*)p + sz); *(int*)0x5FFFFFFF000 = 0; // should segfault @@ -257,33 +423,30 @@ static void tsan_numa_test() { t1.join(); } -// issue #? -#include -#include -#include -static void bench_alloc_large(void) { - static constexpr int kNumBuffers = 20; - static constexpr size_t kMinBufferSize = 5 * 1024 * 1024; - static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024; - std::unique_ptr buffers[kNumBuffers]; - - std::random_device rd; - std::mt19937 gen(42); //rd()); - std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize); - std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1); - - static constexpr int kNumIterations = 2000; - const auto start = std::chrono::steady_clock::now(); - for (int i = 0; i < kNumIterations; ++i) { - int buffer_idx = buf_number_distribution(gen); - size_t new_size = size_distribution(gen); - buffers[buffer_idx] = std::make_unique(new_size); - } - const auto end = std::chrono::steady_clock::now(); - const auto num_ms = std::chrono::duration_cast(end - start).count(); - const auto us_per_allocation = std::chrono::duration_cast(end - start).count() / kNumIterations; - std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl; - std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl; +class MTest +{ + char *data; +public: + MTest() { data = (char*)malloc(1024); } + ~MTest() { free(data); }; +}; + +thread_local MTest tlVariable; + +void threadFun( int i ) +{ + printf( "Thread %d\n", i ); + std::this_thread::sleep_for( std::chrono::milliseconds(100) ); } +void test_thread_local() +{ + for( int i=1; i < 100; ++i ) + { + std::thread t( threadFun, i ); + t.join(); + mi_stats_print(NULL); + } + return; +} \ No newline at end of file diff --git a/depends/mimalloc/test/test-api-fill.c b/depends/mimalloc/test/test-api-fill.c index c205637c359c..eebbd394ef83 100644 --- a/depends/mimalloc/test/test-api-fill.c +++ b/depends/mimalloc/test/test-api-fill.c @@ -5,7 +5,7 @@ terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ #include "mimalloc.h" -#include "mimalloc-types.h" +#include "mimalloc/types.h" #include "testhelper.h" @@ -164,7 +164,7 @@ int main(void) { mi_free(p); }; -#if MI_DEBUG >= 2 +#if (MI_DEBUG >= 2) && !MI_TSAN // --------------------------------------------------- // Debug filling // --------------------------------------------------- @@ -271,7 +271,7 @@ int main(void) { mi_free(p); }; - + #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_GUARDED) CHECK_BODY("fill-freed-small") { size_t malloc_size = MI_SMALL_SIZE_MAX / 2; uint8_t* p = (uint8_t*)mi_malloc(malloc_size); @@ -286,6 +286,7 @@ int main(void) { // First sizeof(void*) bytes will contain housekeeping data, skip these result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*)); }; + #endif #endif // --------------------------------------------------- @@ -309,7 +310,7 @@ bool check_zero_init(uint8_t* p, size_t size) { #if MI_DEBUG >= 2 bool check_debug_fill_uninit(uint8_t* p, size_t size) { -#if MI_VALGRIND +#if MI_TRACK_VALGRIND || MI_TRACK_ASAN (void)p; (void)size; return true; // when compiled with valgrind we don't init on purpose #else @@ -325,10 +326,10 @@ bool check_debug_fill_uninit(uint8_t* p, size_t size) { } bool check_debug_fill_freed(uint8_t* p, size_t size) { -#if MI_VALGRIND +#if MI_TRACK_VALGRIND (void)p; (void)size; return true; // when compiled with valgrind we don't fill on purpose -#else +#else if(!p) return false; @@ -337,6 +338,6 @@ bool check_debug_fill_freed(uint8_t* p, size_t size) { result &= p[i] == MI_DEBUG_FREED; } return result; -#endif +#endif } #endif diff --git a/depends/mimalloc/test/test-api.c b/depends/mimalloc/test/test-api.c index 3c2ef7e43fdf..fa8fc3cd5271 100644 --- a/depends/mimalloc/test/test-api.c +++ b/depends/mimalloc/test/test-api.c @@ -33,8 +33,8 @@ we therefore test the API over various inputs. Please add more tests :-) #endif #include "mimalloc.h" -// #include "mimalloc-internal.h" -#include "mimalloc-types.h" // for MI_DEBUG +// #include "mimalloc/internal.h" +#include "mimalloc/types.h" // for MI_DEBUG and MI_PAGE_MAX_OVERALLOC_ALIGN #include "testhelper.h" @@ -46,27 +46,54 @@ bool test_heap2(void); bool test_stl_allocator1(void); bool test_stl_allocator2(void); +bool test_stl_heap_allocator1(void); +bool test_stl_heap_allocator2(void); +bool test_stl_heap_allocator3(void); +bool test_stl_heap_allocator4(void); + +bool mem_is_zero(uint8_t* p, size_t size) { + if (p==NULL) return false; + for (size_t i = 0; i < size; ++i) { + if (p[i] != 0) return false; + } + return true; +} + // --------------------------------------------------------------------------- // Main testing // --------------------------------------------------------------------------- int main(void) { mi_option_disable(mi_option_verbose); + CHECK_BODY("malloc-aligned9a") { // test large alignments + void* p = mi_zalloc_aligned(1024 * 1024, 2); + mi_free(p); + p = mi_zalloc_aligned(1024 * 1024, 2); + mi_free(p); + result = true; + }; + + // --------------------------------------------------- // Malloc // --------------------------------------------------- CHECK_BODY("malloc-zero") { - void* p = mi_malloc(0); + void* p = mi_malloc(0); result = (p != NULL); mi_free(p); }; CHECK_BODY("malloc-nomem1") { result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL); }; - CHECK_BODY("malloc-null") { + CHECK_BODY("malloc-free-null") { mi_free(NULL); }; + #if MI_INTPTR_BITS > 32 + CHECK_BODY("malloc-free-invalid-low") { + mi_free((void*)(MI_ZU(0x0000000003990080))); // issue #1087 + }; + #endif CHECK_BODY("calloc-overflow") { // use (size_t)&mi_calloc to get some number without triggering compiler warnings result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); @@ -83,7 +110,7 @@ int main(void) { // --------------------------------------------------- // Extended - // --------------------------------------------------- + // --------------------------------------------------- CHECK_BODY("posix_memalign1") { void* p = &p; int err = mi_posix_memalign(&p, sizeof(void*), 32); @@ -122,7 +149,7 @@ int main(void) { void* p = mi_malloc_aligned(48,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p); }; CHECK_BODY("malloc-aligned3") { - void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0); + void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0); void* p2 = mi_malloc_aligned(48,32); bool result2 = (p2 != NULL && (uintptr_t)(p2) % 32 == 0); mi_free(p2); mi_free(p1); @@ -138,18 +165,20 @@ int main(void) { result = ok; }; CHECK_BODY("malloc-aligned5") { - void* p = mi_malloc_aligned(4097,4096); - size_t usable = mi_usable_size(p); - result = (usable >= 4097 && usable < 16000); - printf("malloc_aligned5: usable size: %zi\n", usable); + void* p = mi_malloc_aligned(4097,4096); + size_t usable = mi_usable_size(p); + result = (usable >= 4097 && usable < 16000); + fprintf(stderr, "malloc_aligned5: usable size: %zi. ", usable); mi_free(p); }; + /* CHECK_BODY("malloc-aligned6") { bool ok = true; - for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) { + for (size_t align = 1; align <= MI_PAGE_MAX_OVERALLOC_ALIGN && ok; align *= 2) { void* ps[8]; for (int i = 0; i < 8 && ok; i++) { - ps[i] = mi_malloc_aligned(align*13 /*size*/, align); + ps[i] = mi_malloc_aligned(align*13 // size + , align); if (ps[i] == NULL || (uintptr_t)(ps[i]) % align != 0) { ok = false; } @@ -160,18 +189,98 @@ int main(void) { } result = ok; }; + */ CHECK_BODY("malloc-aligned7") { - void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX); mi_free(p); - }; + void* p = mi_malloc_aligned(1024,MI_PAGE_MAX_OVERALLOC_ALIGN); + mi_free(p); + result = ((uintptr_t)p % MI_PAGE_MAX_OVERALLOC_ALIGN) == 0; + }; CHECK_BODY("malloc-aligned8") { - void* p = mi_malloc_aligned(1024,2*MI_ALIGNMENT_MAX); mi_free(p); + bool ok = true; + for (int i = 0; i < 5 && ok; i++) { + int n = (1 << i); + void* p = mi_malloc_aligned(1024, n * MI_PAGE_MAX_OVERALLOC_ALIGN); + ok = ((uintptr_t)p % (n*MI_PAGE_MAX_OVERALLOC_ALIGN)) == 0; + mi_free(p); + } + result = ok; + }; + CHECK_BODY("malloc-aligned9") { // test large alignments + bool ok = true; + void* p[8]; + const int max_align_shift = + #if SIZE_MAX > UINT32_MAX + 28 + #else + 20 + #endif + ; + size_t sizes[8] = { 8, 512, 1024 * 1024, MI_PAGE_MAX_OVERALLOC_ALIGN, MI_PAGE_MAX_OVERALLOC_ALIGN + 1, 2 * MI_PAGE_MAX_OVERALLOC_ALIGN, 8 * MI_PAGE_MAX_OVERALLOC_ALIGN, 0 }; + for (int i = 0; i < max_align_shift && ok; i++) { + int align = (1 << i); + for (int j = 0; j < 8 && ok; j++) { + p[j] = mi_zalloc_aligned(sizes[j], align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for (int j = 0; j < 8; j++) { + mi_free(p[j]); + } + } + result = ok; }; + CHECK_BODY("malloc-aligned10") { + bool ok = true; + void* p[10+1]; + int align; + int j; + for(j = 0, align = 1; j <= 10 && ok; align *= 2, j++ ) { + p[j] = mi_malloc_aligned(43 + align, align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for ( ; j > 0; j--) { + mi_free(p[j-1]); + } + result = ok; + } + CHECK_BODY("malloc_aligned11") { + mi_heap_t* heap = mi_heap_new(); + void* p = mi_heap_malloc_aligned(heap, 33554426, 8); + result = mi_heap_contains_block(heap, p); + mi_heap_destroy(heap); + } + CHECK_BODY("mimalloc-aligned12") { + void* p = mi_malloc_aligned(0x100, 0x100); + result = (((uintptr_t)p % 0x100) == 0); // #602 + mi_free(p); + } + CHECK_BODY("mimalloc-aligned13") { + bool ok = true; + for( size_t size = 1; size <= (MI_SMALL_SIZE_MAX * 2) && ok; size++ ) { + for(size_t align = 1; align <= size && ok; align *= 2 ) { + void* p[10]; + for(int i = 0; i < 10 && ok; i++) { + p[i] = mi_malloc_aligned(size,align);; + ok = (p[i] != NULL && ((uintptr_t)(p[i]) % align) == 0); + } + for(int i = 0; i < 10 && ok; i++) { + mi_free(p[i]); + } + /* + if (ok && align <= size && ((size + MI_PADDING_SIZE) & (align-1)) == 0) { + size_t bsize = mi_good_size(size); + ok = (align <= bsize && (bsize & (align-1)) == 0); + } + */ + } + } + result = ok; + } CHECK_BODY("malloc-aligned-at1") { void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p); }; CHECK_BODY("malloc-aligned-at2") { void* p = mi_malloc_aligned_at(50,32,8); result = (p != NULL && ((uintptr_t)(p) + 8) % 32 == 0); mi_free(p); - }; + }; CHECK_BODY("memalign1") { void* p; bool ok = true; @@ -181,7 +290,22 @@ int main(void) { } result = ok; }; - + CHECK_BODY("zalloc-aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("rezalloc_aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = result && mem_is_zero(p, zalloc_size); + mi_free(p); + }; + // --------------------------------------------------- // Reallocation // --------------------------------------------------- @@ -221,15 +345,22 @@ int main(void) { // --------------------------------------------------- // various // --------------------------------------------------- + #if !defined(MI_TRACK_ASAN) // realpath may leak with ASAN enabled (as the ASAN allocator intercepts it) CHECK_BODY("realpath") { char* s = mi_realpath( ".", NULL ); // printf("realpath: %s\n",s); mi_free(s); }; + #endif CHECK("stl_allocator1", test_stl_allocator1()); CHECK("stl_allocator2", test_stl_allocator2()); + CHECK("stl_heap_allocator1", test_stl_heap_allocator1()); + CHECK("stl_heap_allocator2", test_stl_heap_allocator2()); + CHECK("stl_heap_allocator3", test_stl_heap_allocator3()); + CHECK("stl_heap_allocator4", test_stl_heap_allocator4()); + // --------------------------------------------------- // Done // ---------------------------------------------------[] @@ -240,7 +371,7 @@ int main(void) { // Larger test functions // --------------------------------------------------- -bool test_heap1() { +bool test_heap1(void) { mi_heap_t* heap = mi_heap_new(); int* p1 = mi_heap_malloc_tp(heap,int); int* p2 = mi_heap_malloc_tp(heap,int); @@ -249,7 +380,7 @@ bool test_heap1() { return true; } -bool test_heap2() { +bool test_heap2(void) { mi_heap_t* heap = mi_heap_new(); int* p1 = mi_heap_malloc_tp(heap,int); int* p2 = mi_heap_malloc_tp(heap,int); @@ -260,7 +391,7 @@ bool test_heap2() { return true; } -bool test_stl_allocator1() { +bool test_stl_allocator1(void) { #ifdef __cplusplus std::vector > vec; vec.push_back(1); @@ -273,7 +404,7 @@ bool test_stl_allocator1() { struct some_struct { int i; int j; double z; }; -bool test_stl_allocator2() { +bool test_stl_allocator2(void) { #ifdef __cplusplus std::vector > vec; vec.push_back(some_struct()); @@ -283,3 +414,61 @@ bool test_stl_allocator2() { return true; #endif } + +bool test_stl_heap_allocator1(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator2(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator3(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_delete(heap); + return good; +#else + return true; +#endif +} + +bool test_stl_heap_allocator4(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_destroy_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_destroy(heap); + return good; +#else + return true; +#endif +} diff --git a/depends/mimalloc/test/test-stress.c b/depends/mimalloc/test/test-stress.c index 61171d0389b7..a2e2b3776f35 100644 --- a/depends/mimalloc/test/test-stress.c +++ b/depends/mimalloc/test/test-stress.c @@ -7,7 +7,7 @@ terms of the MIT license. /* This is a stress test for the allocator, using multiple threads and transferring objects between threads. It tries to reflect real-world workloads: - allocation size is distributed linearly in powers of two - - with some fraction extra large (and some extra extra large) + - with some fraction extra large (and some very large) - the allocations are initialized and read again at free - pointers transfer between threads - threads are terminated and recreated with some objects surviving in between @@ -20,33 +20,73 @@ terms of the MIT license. #include #include #include +#include + +// #define MI_GUARDED +// #define USE_STD_MALLOC // > mimalloc-test-stress [THREADS] [SCALE] [ITER] // // argument defaults +#if defined(MI_TSAN) // with thread-sanitizer reduce the threads to test within the azure pipeline limits +static int THREADS = 8; +static int SCALE = 25; +static int ITER = 400; +#elif defined(MI_UBSAN) // with undefined behavious sanitizer reduce parameters to stay within the azure pipeline limits +static int THREADS = 8; +static int SCALE = 25; +static int ITER = 20; +#elif defined(MI_GUARDED) // with debug guard pages reduce parameters to stay within the azure pipeline limits +static int THREADS = 8; +static int SCALE = 10; +static int ITER = 10; +#elif 0 +static int THREADS = 4; +static int SCALE = 10; +static int ITER = 20; +#elif 0 +static int THREADS = 32; +static int SCALE = 50; +static int ITER = 50; +#elif 0 +static int THREADS = 32; +static int SCALE = 25; +static int ITER = 50; +#define ALLOW_LARGE true +#else static int THREADS = 32; // more repeatable if THREADS <= #processors -static int SCALE = 25; // scaling factor +static int SCALE = 50; // scaling factor static int ITER = 50; // N full iterations destructing and re-creating all threads +#endif -// static int THREADS = 8; // more repeatable if THREADS <= #processors -// static int SCALE = 100; // scaling factor -#define STRESS // undefine for leak test -static bool allow_large_objects = true; // allow very large objects? -static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`? +#define STRESS // undefine for leak test +#ifndef ALLOW_LARGE +#define ALLOW_LARGE false +#endif + +static bool allow_large_objects = ALLOW_LARGE; // allow very large objects? (set to `true` if SCALE>100) + +static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`? + +static bool main_participates = false; // main thread participates as a worker too -// #define USE_STD_MALLOC #ifdef USE_STD_MALLOC -#define custom_calloc(n,s) malloc(n*s) +#define custom_calloc(n,s) calloc(n,s) #define custom_realloc(p,s) realloc(p,s) #define custom_free(p) free(p) #else #include -#define custom_calloc(n,s) mi_malloc(n*s) +#include +#define custom_calloc(n,s) mi_calloc(n,s) #define custom_realloc(p,s) mi_realloc(p,s) #define custom_free(p) mi_free(p) + +#ifndef NDEBUG +#define xHEAP_WALK // walk the heap objects? +#endif #endif // transfer pointer between threads @@ -95,12 +135,13 @@ static void* alloc_items(size_t items, random_t r) { else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge else items *= 100; // 1% large objects; } - if (items == 40) items++; // pthreads uses that size for stack increases + if (items>=32 && items<=40) items*=2; // pthreads uses 320b allocations (this shows that more clearly in the stats) if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t)); if (items==0) items = 1; uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t)); if (p != NULL) { for (uintptr_t i = 0; i < items; i++) { + assert(p[i] == 0); p[i] = (items - i) ^ cookie; } } @@ -121,6 +162,16 @@ static void free_items(void* p) { custom_free(p); } +#ifdef HEAP_WALK +static bool visit_blocks(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + (void)(heap); (void)(area); + size_t* total = (size_t*)arg; + if (block != NULL) { + *total += block_size; + } + return true; +} +#endif static void stress(intptr_t tid) { //bench_start_thread(); @@ -165,6 +216,13 @@ static void stress(intptr_t tid) { data[data_idx] = q; } } + + #ifdef HEAP_WALK + // walk the heap + size_t total = 0; + mi_heap_visit_blocks(mi_heap_get_default(), true, visit_blocks, &total); + #endif + // free everything that is left for (size_t i = 0; i < retain_top; i++) { free_items(retained[i]); @@ -182,7 +240,15 @@ static void run_os_threads(size_t nthreads, void (*entry)(intptr_t tid)); static void test_stress(void) { uintptr_t r = rand(); for (int n = 0; n < ITER; n++) { - run_os_threads(THREADS, &stress); + run_os_threads(THREADS, &stress); + #if !defined(NDEBUG) && !defined(USE_STD_MALLOC) + // switch between arena and OS allocation for testing + // mi_option_set_enabled(mi_option_disallow_arena_alloc, (n%2)==1); + #endif + #ifdef HEAP_WALK + size_t total = 0; + mi_abandoned_visit_blocks(mi_subproc_main(), -1, true, visit_blocks, &total); + #endif for (int i = 0; i < TRANSFERS; i++) { if (chance(50, &r) || n + 1 == ITER) { // free all on last run, otherwise free half of the transfers void* p = atomic_exchange_ptr(&transfer[i], NULL); @@ -191,12 +257,26 @@ static void test_stress(void) { } #ifndef NDEBUG //mi_collect(false); - //mi_debug_show_arenas(); - #endif + //mi_debug_show_arenas(true); + #endif #if !defined(NDEBUG) || defined(MI_TSAN) - if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); } + if ((n + 1) % 10 == 0) { + printf("- iterations left: %3d\n", ITER - (n + 1)); + #ifndef USE_STD_MALLOC + mi_debug_show_arenas(); + #endif + //mi_collect(true); + //mi_debug_show_arenas(); + } #endif } + // clean up + for (int i = 0; i < TRANSFERS; i++) { + void* p = atomic_exchange_ptr(&transfer[i], NULL); + if (p != NULL) { + free_items(p); + } + } } #ifndef STRESS @@ -221,7 +301,29 @@ static void test_leak(void) { } #endif -int main(int argc, char** argv) { +#if defined(USE_STD_MALLOC) && defined(MI_LINK_VERSION) +#ifdef __cplusplus +extern "C" +#endif +int mi_version(void); +#endif + +int main(int argc, char** argv) { + #ifdef MI_LINK_VERSION + mi_version(); + #endif + #ifdef HEAP_WALK + mi_option_enable(mi_option_visit_abandoned); + #endif + #if !defined(NDEBUG) && !defined(USE_STD_MALLOC) + // mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */); + // mi_option_set(mi_option_purge_delay,1); + #endif + #if defined(NDEBUG) && !defined(USE_STD_MALLOC) + // mi_option_set(mi_option_purge_delay,-1); + mi_option_set(mi_option_page_reclaim_on_free, 0); + #endif + // > mimalloc-test-stress [THREADS] [SCALE] [ITER] if (argc >= 2) { char* end; @@ -238,7 +340,15 @@ int main(int argc, char** argv) { long n = (strtol(argv[3], &end, 10)); if (n > 0) ITER = n; } - printf("Using %d threads with a %d%% load-per-thread and %d iterations\n", THREADS, SCALE, ITER); + if (SCALE > 100) { + allow_large_objects = true; + } + printf("Using %d threads with a %d%% load-per-thread and %d iterations %s\n", THREADS, SCALE, ITER, (allow_large_objects ? "(allow large objects)" : "")); + + #if !defined(NDEBUG) && !defined(USE_STD_MALLOC) + mi_stats_reset(); + #endif + //mi_reserve_os_memory(1024*1024*1024ULL, false, true); //int res = mi_reserve_huge_os_pages(4,1); //printf("(reserve huge: %i\n)", res); @@ -247,26 +357,26 @@ int main(int argc, char** argv) { // Run ITER full iterations where half the objects in the transfer buffer survive to the next round. srand(0x7feb352d); - - //mi_reserve_os_memory(512ULL << 20, true, true); - -#if !defined(NDEBUG) && !defined(USE_STD_MALLOC) - mi_stats_reset(); -#endif - + // mi_stats_reset(); #ifdef STRESS - test_stress(); + test_stress(); #else - test_leak(); + test_leak(); #endif #ifndef USE_STD_MALLOC #ifndef NDEBUG + mi_debug_show_arenas(); mi_collect(true); - //mi_debug_show_arenas(); + char* json = mi_stats_get_json(0, NULL); + if (json != NULL) { + fputs(json,stderr); + mi_free(json); + } #endif + mi_collect(true); mi_stats_print(NULL); -#endif +#endif //bench_end_program(); return 0; } @@ -276,7 +386,7 @@ static void (*thread_entry_fun)(intptr_t) = &stress; #ifdef _WIN32 -#include +#include static DWORD WINAPI thread_entry(LPVOID param) { thread_entry_fun((intptr_t)param); @@ -287,13 +397,16 @@ static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { thread_entry_fun = fun; DWORD* tids = (DWORD*)custom_calloc(nthreads,sizeof(DWORD)); HANDLE* thandles = (HANDLE*)custom_calloc(nthreads,sizeof(HANDLE)); - for (uintptr_t i = 0; i < nthreads; i++) { - thandles[i] = CreateThread(0, 8*1024, &thread_entry, (void*)(i), 0, &tids[i]); + thandles[0] = GetCurrentThread(); // avoid lint warning + const size_t start = (main_participates ? 1 : 0); + for (size_t i = start; i < nthreads; i++) { + thandles[i] = CreateThread(0, 8*1024L, &thread_entry, (void*)(i), 0, &tids[i]); } - for (size_t i = 0; i < nthreads; i++) { + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { WaitForSingleObject(thandles[i], INFINITE); } - for (size_t i = 0; i < nthreads; i++) { + for (size_t i = start; i < nthreads; i++) { CloseHandle(thandles[i]); } custom_free(tids); @@ -320,11 +433,13 @@ static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { thread_entry_fun = fun; pthread_t* threads = (pthread_t*)custom_calloc(nthreads,sizeof(pthread_t)); memset(threads, 0, sizeof(pthread_t) * nthreads); + const size_t start = (main_participates ? 1 : 0); //pthread_setconcurrency(nthreads); - for (size_t i = 0; i < nthreads; i++) { + for (size_t i = start; i < nthreads; i++) { pthread_create(&threads[i], NULL, &thread_entry, (void*)i); } - for (size_t i = 0; i < nthreads; i++) { + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { pthread_join(threads[i], NULL); } custom_free(threads); diff --git a/depends/mimalloc/test/test-wrong.c b/depends/mimalloc/test/test-wrong.c index 8bf7767edb4d..56a2339a7554 100644 --- a/depends/mimalloc/test/test-wrong.c +++ b/depends/mimalloc/test/test-wrong.c @@ -5,20 +5,42 @@ terms of the MIT license. A copy of the license can be found in the file "LICENSE" at the root of this distribution. -----------------------------------------------------------------------------*/ -/* test file for valgrind support. +/* test file for valgrind/asan support. + + VALGRIND: + ---------- Compile in an "out/debug" folder: > cd out/debug - > cmake ../.. -DMI_VALGRIND=1 + > cmake ../.. -DMI_TRACK_VALGRIND=1 > make -j8 - and then compile this file as: + and then compile this file as: > gcc -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-valgrind-debug.a -lpthread and test as: > valgrind ./test-wrong + + + ASAN + ---------- + Compile in an "out/debug" folder: + + > cd out/debug + > cmake ../.. -DMI_TRACK_ASAN=1 + > make -j8 + + and then compile this file as: + + > clang -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address + + and test as: + + > ASAN_OPTIONS=verbosity=1:halt_on_error=0 ./test-wrong + + */ #include #include @@ -31,9 +53,9 @@ terms of the MIT license. A copy of the license can be found in the file #endif int main(int argc, char** argv) { - int* p = mi(malloc)(3*sizeof(int)); - - int* r = mi_malloc_aligned(8,16); + int* p = (int*)mi(malloc)(3*sizeof(int)); + + int* r = (int*)mi_malloc_aligned(8,16); mi_free(r); // illegal byte wise read @@ -42,12 +64,12 @@ int main(int argc, char** argv) { mi(free)(c); // undefined access - int* q = mi(malloc)(sizeof(int)); + int* q = (int*)mi(malloc)(sizeof(int)); printf("undefined: %d\n", *q); // illegal int read printf("invalid: over: %d, under: %d\n", q[1], q[-1]); - + *q = 42; // buffer overflow @@ -55,7 +77,7 @@ int main(int argc, char** argv) { // buffer underflow q[-1] = 44; - + mi(free)(q); // double free @@ -66,5 +88,5 @@ int main(int argc, char** argv) { // leak p // mi_free(p) - return 0; + return 0; } \ No newline at end of file diff --git a/depends/mimalloc/test/testhelper.h b/depends/mimalloc/test/testhelper.h index 44776b74b337..a97275841177 100644 --- a/depends/mimalloc/test/testhelper.h +++ b/depends/mimalloc/test/testhelper.h @@ -19,12 +19,12 @@ static int failed = 0; static bool check_result(bool result, const char* testname, const char* fname, long lineno) { if (!(result)) { - failed++; + failed++; fprintf(stderr,"\n FAILED: %s: %s:%ld\n", testname, fname, lineno); - /* exit(1); */ - } - else { - ok++; + /* exit(1); */ + } + else { + ok++; fprintf(stderr, "ok.\n"); } return true; diff --git a/depends/relic/CMakeLists.txt b/depends/relic/CMakeLists.txt index 58bf27a5a40f..db36c9bb7d46 100644 --- a/depends/relic/CMakeLists.txt +++ b/depends/relic/CMakeLists.txt @@ -1,7 +1,5 @@ -cmake_minimum_required(VERSION 3.14) -if(NOT ${CMAKE_VERSION} VERSION_LESS "3.1") - cmake_policy(SET CMP0054 NEW) -endif() +cmake_minimum_required(VERSION 3.18) +cmake_policy(SET CMP0054 NEW) project(RELIC C CXX) set(PROJECT_VERSION_MAJOR "0") diff --git a/go-bindings/Makefile b/go-bindings/Makefile index c7a57e7307d7..dcea3f24f2d3 100644 --- a/go-bindings/Makefile +++ b/go-bindings/Makefile @@ -1,29 +1,59 @@ -SRC_DIR=$(PWD)/../src -BUILD_DIR=$(PWD)/../build +export CGO_ENABLED := 1 +GO=go -GO="go" -COVERAGE_OUTPUT ?= coverage.out - -.PHONY: default vet test clean +OS := $(shell uname -s) +ARCH := $(shell uname -m) -default: prepare vet test clean +COVERAGE_OUTPUT ?= coverage.out MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) + +BUILD_DIR=$(CURDIR)/../build CURR_DIR := $(dir $(MAKEFILE_PATH)) +SRC_DIR=$(CURDIR)/../src -CGO_ENABLED := 1 +GMP_PREFIX := /usr/local -CGO_LDFLAGS ?= "\ --L$(CURR_DIR)../build/depends/mimalloc \ --L$(CURR_DIR)../build/depends/relic/lib \ --L$(CURR_DIR)../build/src \ --ldashbls -lrelic_s -lmimalloc-secure -lgmp" +ifeq ("$(OS)", "Darwin") + ifneq ($(wildcard /opt/local/bin/port),) + GMP_PREFIX := /opt/local + endif + ifeq ("$(ARCH)", "arm64") + ifneq ($(wildcard /opt/homebrew/bin/brew),) + GMP_PREFIX := /opt/homebrew + endif + endif +else ifeq ("$(OS)", "Linux") + ifneq ($(wildcard /home/linuxbrew/.linuxbrew/bin/brew),) + GMP_PREFIX := /home/linuxbrew/.linuxbrew + endif +endif + +MIMALLOC_LIB := mimalloc-secure +MIMALLOC_LIB_PATH := $(CURR_DIR)../build/depends/mimalloc + +ifneq ($(wildcard $(MIMALLOC_LIB_PATH)/libmimalloc-secure-debug.a),) + MIMALLOC_LIB := mimalloc-secure-debug +endif CGO_CXXFLAGS ?= "\ -I$(CURR_DIR)../build/depends/relic/include \ -I$(CURR_DIR)../depends/mimalloc/include \ -I$(CURR_DIR)../depends/relic/include \ --I$(CURR_DIR)../include" +-I$(CURR_DIR)../include \ +-I$(GMP_PREFIX)/include" + +CGO_LDFLAGS ?= "\ +-L$(CURR_DIR)../build/src -ldashbls \ +-L$(MIMALLOC_LIB_PATH) -l$(MIMALLOC_LIB) \ +-L$(CURR_DIR)../build/depends/relic/lib -lrelic_s \ +-L$(GMP_PREFIX)/lib -lgmp" + +.PHONY: default prepare fmt test cover vet help clean config + +all: default + +default: prepare vet test clean prepare: @mkdir -p ../build/src/dashbls @@ -42,7 +72,7 @@ cover: ## Run tests and generate test coverage file, output coverage results an rm -f $(COVERAGE_OUTPUT) vet: ## Go vet all project code - CGO_CXXFLAGS=$(CGO_CXXFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) go vet ./... + CGO_CXXFLAGS=$(CGO_CXXFLAGS) CGO_LDFLAGS=$(CGO_LDFLAGS) $(GO) vet ./... help: ## Show This Help @for line in $$(cat Makefile | grep "##" | grep -v "grep" | sed "s/:.*##/:/g" | sed "s/\ /!/g"); do verb=$$(echo $$line | cut -d ":" -f 1); desc=$$(echo $$line | cut -d ":" -f 2 | sed "s/!/\ /g"); printf "%-30s--%s\n" "$$verb" "$$desc"; done @@ -50,3 +80,18 @@ help: ## Show This Help clean: ## Clean up transient (generated) files $(GO) clean rm -f $(COVERAGE_OUTPUT) + +config: ## Display build configuration + @echo "" + @echo "OS: $(OS)" + @echo "ARCH: $(ARCH)" + @echo "" + @echo "CC: $${CC:-}" + @echo "CXX: $${CXX:-}" + @echo "GO: $(GO)" + @echo "" + @echo "GOROOT: $${GOROOT:-}" + @echo "" + @echo "CGO_CXXFLAGS: $(CGO_CXXFLAGS)" + @echo "CGO_LDFLAGS: $(CGO_LDFLAGS)" + @echo "" diff --git a/go-bindings/elements.cpp b/go-bindings/elements.cpp index fda47b2e6237..eb30fcec88ec 100644 --- a/go-bindings/elements.cpp +++ b/go-bindings/elements.cpp @@ -23,11 +23,11 @@ int CG1ElementSize() { return bls::G1Element::SIZE; } -CG1Element CG1ElementFromBytes(const void* data, bool* didErr) { +CG1Element CG1ElementFromBytes(const void* data, size_t len, bool* didErr) { bls::G1Element* el = nullptr; try { el = new bls::G1Element( - bls::G1Element::FromBytes(bls::Bytes((uint8_t*)(data), bls::G1Element::SIZE)) + bls::G1Element::FromBytes(bls::Bytes((uint8_t*)data, len)) ); } catch(const std::exception& ex) { gErrMsg = ex.what(); @@ -93,11 +93,11 @@ int CG2ElementSize() { return bls::G2Element::SIZE; } -CG2Element CG2ElementFromBytes(const void* data, bool* didErr) { +CG2Element CG2ElementFromBytes(const void* data, size_t len, bool* didErr) { bls::G2Element* el = nullptr; try { el = new bls::G2Element( - bls::G2Element::FromBytes(bls::Bytes((uint8_t*)data, bls::G2Element::SIZE)) + bls::G2Element::FromBytes(bls::Bytes((uint8_t*)data, len)) ); *didErr = false; } catch(const std::exception& ex) { diff --git a/go-bindings/elements.go b/go-bindings/elements.go index 013127677c7f..84548c17e1dd 100644 --- a/go-bindings/elements.go +++ b/go-bindings/elements.go @@ -14,8 +14,6 @@ package blschia -// #cgo LDFLAGS: -ldashbls -lrelic_s -lmimalloc-secure -lgmp -// #cgo CXXFLAGS: -std=c++14 // #include // #include // #include "elements.h" @@ -40,7 +38,7 @@ func G1ElementFromBytes(data []byte) (*G1Element, error) { defer C.free(cBytesPtr) var cDidErr C.bool el := G1Element{ - val: C.CG1ElementFromBytes(cBytesPtr, &cDidErr), + val: C.CG1ElementFromBytes(cBytesPtr, C.size_t(len(data)), &cDidErr), } if bool(cDidErr) { return nil, errFromC() @@ -131,7 +129,7 @@ func G2ElementFromBytes(data []byte) (*G2Element, error) { defer C.free(cBytesPtr) var cDidErr C.bool el := G2Element{ - val: C.CG2ElementFromBytes(cBytesPtr, &cDidErr), + val: C.CG2ElementFromBytes(cBytesPtr, C.size_t(len(data)), &cDidErr), } if bool(cDidErr) { return nil, errFromC() diff --git a/go-bindings/elements.h b/go-bindings/elements.h index 2a4c6e544b8d..a391534dc55d 100644 --- a/go-bindings/elements.h +++ b/go-bindings/elements.h @@ -27,7 +27,7 @@ typedef void* CPrivateKey; // G1Element int CG1ElementSize(); -CG1Element CG1ElementFromBytes(const void* data, bool* didErr); +CG1Element CG1ElementFromBytes(const void* data, size_t len, bool* didErr); CG1Element CG1ElementGenerator(); bool CG1ElementIsValid(const CG1Element el); uint32_t CG1ElementGetFingerprint(const CG1Element el); @@ -40,7 +40,7 @@ void CG1ElementFree(const CG1Element el); // G2Element int CG2ElementSize(); -CG2Element CG2ElementFromBytes(const void* data, bool* didErr); +CG2Element CG2ElementFromBytes(const void* data, size_t len, bool* didErr); CG2Element CG2ElementGenerator(); bool CG2ElementIsValid(const CG2Element el); bool CG2ElementIsEqual(const CG2Element el1, const CG2Element el2); diff --git a/go-bindings/privatekey.cpp b/go-bindings/privatekey.cpp index cbe5fddf3a74..90b7f0aab696 100644 --- a/go-bindings/privatekey.cpp +++ b/go-bindings/privatekey.cpp @@ -20,12 +20,12 @@ #include "utils.hpp" // private key bindings implementation -CPrivateKey CPrivateKeyFromBytes(const void* data, const bool modOrder, bool* didErr) { +CPrivateKey CPrivateKeyFromBytes(const void* data, size_t len, const bool modOrder, bool* didErr) { bls::PrivateKey* skPtr = nullptr; try { skPtr = new bls::PrivateKey( bls::PrivateKey::FromBytes( - bls::Bytes((uint8_t*)data, bls::PrivateKey::PRIVATE_KEY_SIZE), + bls::Bytes((uint8_t*)data, len), modOrder ) ); diff --git a/go-bindings/privatekey.go b/go-bindings/privatekey.go index 1a47454b26e0..155338e834cb 100644 --- a/go-bindings/privatekey.go +++ b/go-bindings/privatekey.go @@ -14,8 +14,6 @@ package blschia -// #cgo LDFLAGS: -ldashbls -lrelic_s -lmimalloc-secure -lgmp -// #cgo CXXFLAGS: -std=c++14 // #include // #include // #include "privatekey.h" @@ -38,7 +36,7 @@ func PrivateKeyFromBytes(data []byte, modOrder bool) (*PrivateKey, error) { defer C.SecFree(cBytesPtr) var cDidErr C.bool sk := PrivateKey{ - val: C.CPrivateKeyFromBytes(cBytesPtr, C.bool(modOrder), &cDidErr), + val: C.CPrivateKeyFromBytes(cBytesPtr, C.size_t(len(data)), C.bool(modOrder), &cDidErr), } if bool(cDidErr) { return nil, errFromC() diff --git a/go-bindings/privatekey.h b/go-bindings/privatekey.h index 55d1c86ea852..13f3c5d3dd14 100644 --- a/go-bindings/privatekey.h +++ b/go-bindings/privatekey.h @@ -23,7 +23,7 @@ extern "C" { typedef void* CPrivateKey; -CPrivateKey CPrivateKeyFromBytes(const void* data, const bool modOrder, bool* didErr); +CPrivateKey CPrivateKeyFromBytes(const void* data, size_t len, const bool modOrder, bool* didErr); CPrivateKey CPrivateKeyAggregate(void** sks, const size_t len); CG1Element CPrivateKeyGetG1Element(const CPrivateKey sk, bool* didErr); CG2Element CPrivateKeyGetG2Element(const CPrivateKey sk, bool* didErr); diff --git a/go-bindings/schemes.go b/go-bindings/schemes.go index 21126f91258a..2289b212bd7d 100644 --- a/go-bindings/schemes.go +++ b/go-bindings/schemes.go @@ -14,8 +14,6 @@ package blschia -// #cgo LDFLAGS: -ldashbls -lrelic_s -lmimalloc-secure -lgmp -// #cgo CXXFLAGS: -std=c++14 // #include // #include // #include "schemes.h" diff --git a/go-bindings/threshold.go b/go-bindings/threshold.go index 5d59b1743e4c..5f484cee8dc3 100644 --- a/go-bindings/threshold.go +++ b/go-bindings/threshold.go @@ -14,8 +14,6 @@ package blschia -// #cgo LDFLAGS: -ldashbls -lrelic_s -lmimalloc-secure -lgmp -// #cgo CXXFLAGS: -std=c++14 // #include // #include "threshold.h" // #include "blschia.h" diff --git a/go-bindings/util.go b/go-bindings/util.go index 39b91513c1b5..fbea18895a76 100644 --- a/go-bindings/util.go +++ b/go-bindings/util.go @@ -14,8 +14,6 @@ package blschia -// #cgo LDFLAGS: -ldashbls -lrelic_s -lmimalloc-secure -lgmp -// #cgo CXXFLAGS: -std=c++14 // #include "blschia.h" // #include import "C" diff --git a/include/dashbls/schemes.hpp b/include/dashbls/schemes.hpp index 31e6b1a1b704..1aee61dd346d 100644 --- a/include/dashbls/schemes.hpp +++ b/include/dashbls/schemes.hpp @@ -15,6 +15,7 @@ #ifndef SRC_BLSSCHEMES_HPP_ #define SRC_BLSSCHEMES_HPP_ +#include #include #include @@ -65,8 +66,8 @@ class CoreMPL { virtual bool Verify(const G1Element& pubkey, const Bytes& message, const G2Element& signature); - virtual vector Aggregate(const vector> &signatures); - virtual vector Aggregate(const vector& signatures); + virtual std::array Aggregate(const vector> &signatures); + virtual std::array Aggregate(const vector& signatures); virtual G2Element Aggregate(const vector &signatures); @@ -243,7 +244,7 @@ class LegacySchemeMPL final : public CoreMPL { bool Verify(const Bytes& pubkey, const Bytes& message, const Bytes& signature) final { throw std::runtime_error("Not supported in LegacySchemeMPL"); } bool Verify(const G1Element &pubkey, const Bytes& message, const G2Element &signature) final; - vector Aggregate(const vector> &signatures) final { throw std::runtime_error("Not supported in LegacySchemeMPL"); } + std::array Aggregate(const vector> &signatures) final { throw std::runtime_error("Not supported in LegacySchemeMPL"); } G2Element AggregateSecure(const std::vector& vecPublicKeys, const std::vector& vecSignatures, diff --git a/js-bindings/CMakeLists.txt b/js-bindings/CMakeLists.txt index 03da706af724..f053eb2ea05e 100644 --- a/js-bindings/CMakeLists.txt +++ b/js-bindings/CMakeLists.txt @@ -20,6 +20,20 @@ add_custom_target(install_npm_dependencies npm ci) add_dependencies(blsjstmp install_npm_dependencies) target_link_libraries(blsjstmp PRIVATE dashbls) +target_link_options(blsjstmp PRIVATE + "SHELL:--bind" + "SHELL:-Oz" + "SHELL:--closure 1" + "SHELL:-s MODULARIZE=1" + "SHELL:-s MALLOC=emmalloc" + "SHELL:-s ALLOW_MEMORY_GROWTH=1" + "SHELL:-s INITIAL_MEMORY=134217728" +) + +if(CMAKE_BUILD_TYPE MATCHES "Debug") + target_link_options(blsjstmp PRIVATE "SHELL:-s ASSERTIONS=2") +endif() + # Copy necessary files for the npm package configure_file(${CMAKE_CURRENT_SOURCE_DIR}/package.json package.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/package-lock.json package-lock.json COPYONLY) @@ -34,5 +48,4 @@ foreach(file ${JS_BINDINGS_TESTS}) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/tests/${file} tests/${file} COPYONLY) endforeach() -set_target_properties(blsjstmp PROPERTIES LINK_FLAGS "--bind -Oz --closure 1 -s MODULARIZE=1") add_custom_command(TARGET blsjstmp POST_BUILD COMMAND npm run build:web) diff --git a/python-bindings/pythonbindings.cpp b/python-bindings/pythonbindings.cpp index 419588c24eb8..4ba2acb83a67 100644 --- a/python-bindings/pythonbindings.cpp +++ b/python-bindings/pythonbindings.cpp @@ -25,6 +25,16 @@ namespace py = pybind11; using namespace bls; +namespace { +inline int PyLong_AsByteArray(PyLongObject* obj, uint8_t* buf, Py_ssize_t size, bool is_le, bool is_signed) +{ + return _PyLong_AsByteArray(obj, buf, size, is_le, is_signed +#if PY_VERSION_HEX >= 0x030d0000 + , /*with_exceptions=*/true +#endif // PY_VERSION_HEX >= 0x030d0000 + ); +} +} // anonymous namespace PYBIND11_MODULE(blspy, m) { @@ -46,7 +56,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = reinterpret_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + PrivateKey::PRIVATE_KEY_SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return PrivateKey::FromBytes(data); }) @@ -357,10 +367,10 @@ PYBIND11_MODULE(blspy, m) .def(py::init(&G1Element::FromByteVector), py::call_guard()) .def(py::init([](py::int_ pyint) { std::array buffer{}; - if (_PyLong_AsByteArray( + if (PyLong_AsByteArray( (PyLongObject *)pyint.ptr(), buffer.data(), - G1Element::SIZE, + buffer.size(), 0, 0) < 0) { throw std::invalid_argument("Failed to cast int to G1Element"); @@ -380,7 +390,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = static_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + G1Element::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return G1Element::FromBytes(data); })) @@ -398,7 +408,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = reinterpret_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + G1Element::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return G1Element::FromBytes(data); }) @@ -509,16 +519,16 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = static_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + G2Element::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return G2Element::FromBytes(data); })) .def(py::init([](py::int_ pyint) { std::array buffer{}; - if (_PyLong_AsByteArray( + if (PyLong_AsByteArray( (PyLongObject *)pyint.ptr(), buffer.data(), - G2Element::SIZE, + buffer.size(), 0, 0) < 0) { throw std::invalid_argument("Failed to cast int to G2Element"); @@ -540,7 +550,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = reinterpret_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + G2Element::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return G2Element::FromBytes(data); }) @@ -637,20 +647,20 @@ PYBIND11_MODULE(blspy, m) if ((int)info.size != GTElement::SIZE) { throw std::invalid_argument( - "Length of bytes object not equal to G2Element::SIZE"); + "Length of bytes object not equal to GTElement::SIZE"); } auto data_ptr = static_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + GTElement::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return GTElement::FromBytes(data); })) .def(py::init([](py::int_ pyint) { - std::array buffer{}; - if (_PyLong_AsByteArray( + std::array buffer{}; + if (PyLong_AsByteArray( (PyLongObject *)pyint.ptr(), buffer.data(), - GTElement::SIZE, + buffer.size(), 0, 0) < 0) { throw std::invalid_argument("Failed to cast int to GTElement"); @@ -672,7 +682,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = reinterpret_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + GTElement::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return GTElement::FromBytes(data); }) @@ -690,7 +700,7 @@ PYBIND11_MODULE(blspy, m) } auto data_ptr = reinterpret_cast(info.ptr); std::array data; - std::copy(data_ptr, data_ptr + GTElement::SIZE, data.data()); + std::copy(data_ptr, data_ptr + data.size(), data.data()); py::gil_scoped_release release; return GTElement::FromBytesUnchecked(data); }) diff --git a/rust-bindings/bls-dash-sys/bindings.rs b/rust-bindings/bls-dash-sys/bindings.rs index b90527e85fa2..6d570a7f9321 100644 --- a/rust-bindings/bls-dash-sys/bindings.rs +++ b/rust-bindings/bls-dash-sys/bindings.rs @@ -15,6 +15,7 @@ extern "C" { pub fn G1ElementFromBytes( data: *const ::std::os::raw::c_void, + len: usize, legacy: bool, didErr: *mut bool, ) -> G1Element; @@ -43,6 +44,7 @@ extern "C" { pub fn G2ElementFromBytes( data: *const ::std::os::raw::c_void, + len: usize, legacy: bool, didErr: *mut bool, ) -> G2Element; @@ -67,6 +69,7 @@ extern "C" { pub fn PrivateKeyFromBytes( data: *const ::std::os::raw::c_void, + len: usize, modOrder: bool, didErr: *mut bool, ) -> PrivateKey; @@ -355,6 +358,7 @@ extern "C" { pub fn BIP32ExtendedPublicKeyFromBytes( data: *const ::std::os::raw::c_void, + len: usize, legacy: bool, didErr: *mut bool, ) -> BIP32ExtendedPublicKey; @@ -385,6 +389,7 @@ extern "C" { pub fn BIP32ExtendedPrivateKeyFromBytes( data: *const ::std::os::raw::c_void, + len: usize, didErr: *mut bool, ) -> BIP32ExtendedPrivateKey; diff --git a/rust-bindings/bls-dash-sys/build.rs b/rust-bindings/bls-dash-sys/build.rs index c5245e6c25a9..0570baaa89b0 100644 --- a/rust-bindings/bls-dash-sys/build.rs +++ b/rust-bindings/bls-dash-sys/build.rs @@ -201,12 +201,14 @@ fn main() { println!("cargo:rustc-link-lib=static=relic_s"); - println!( - "cargo:rustc-link-search={}", - root_path.join("build/depends/mimalloc").display() - ); - - println!("cargo:rustc-link-lib=static=mimalloc-secure"); + let mimalloc_dir = root_path.join("build/depends/mimalloc"); + println!("cargo:rustc-link-search={}", mimalloc_dir.display()); + let mimalloc_lib = if mimalloc_dir.join("libmimalloc-secure-debug.a").exists() { + "mimalloc-secure-debug" + } else { + "mimalloc-secure" + }; + println!("cargo:rustc-link-lib=static={}", mimalloc_lib); println!( "cargo:rustc-link-search={}", diff --git a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.cpp b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.cpp index 541f5f992cf1..5e9efb8d81e2 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.cpp +++ b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.cpp @@ -6,12 +6,12 @@ #include "../error.h" #include "bls.hpp" -BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyFromBytes(const void* data, bool* didErr) +BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyFromBytes(const void* data, size_t len, bool* didErr) { bls::ExtendedPrivateKey* el = nullptr; try { el = new bls::ExtendedPrivateKey(bls::ExtendedPrivateKey::FromBytes( - bls::Bytes((uint8_t*)(data), bls::ExtendedPrivateKey::SIZE))); + bls::Bytes((uint8_t*)data, len))); } catch (const std::exception& ex) { gErrMsg = ex.what(); *didErr = true; @@ -26,7 +26,7 @@ BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyFromSeed(const void* data, const bls::ExtendedPrivateKey* el = nullptr; try { el = new bls::ExtendedPrivateKey(bls::ExtendedPrivateKey::FromSeed( - bls::Bytes((uint8_t*)(data), len))); + bls::Bytes((uint8_t*)data, len))); } catch (const std::exception& ex) { gErrMsg = ex.what(); *didErr = true; diff --git a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.h b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.h index 18ca3c28f301..8741737215d0 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.h +++ b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedprivatekey.h @@ -18,6 +18,7 @@ typedef void* BIP32ExtendedPrivateKey; // ExtendedPrivateKey BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyFromBytes( const void* data, + size_t len, bool* didErr); BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyFromSeed(const void* data, const size_t len, bool* didErr); BIP32ExtendedPrivateKey BIP32ExtendedPrivateKeyPrivateChild( diff --git a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.cpp b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.cpp index 703c32e0216f..a52bcabe5605 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.cpp +++ b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.cpp @@ -8,13 +8,14 @@ BIP32ExtendedPublicKey BIP32ExtendedPublicKeyFromBytes( const void* data, + size_t len, const bool legacy, bool* didErr) { bls::ExtendedPublicKey* el = nullptr; try { el = new bls::ExtendedPublicKey(bls::ExtendedPublicKey::FromBytes( - bls::Bytes((uint8_t*)(data), bls::ExtendedPublicKey::SIZE), + bls::Bytes((uint8_t*)data, len), legacy)); } catch (const std::exception& ex) { gErrMsg = ex.what(); diff --git a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.h b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.h index 51465cae039c..420da3156001 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.h +++ b/rust-bindings/bls-dash-sys/c-bindings/bip32/extendedpublickey.h @@ -16,6 +16,7 @@ typedef void* BIP32ExtendedPublicKey; // ExtendedPublicKey BIP32ExtendedPublicKey BIP32ExtendedPublicKeyFromBytes( const void* data, + size_t len, const bool legacy, bool* didErr); BIP32ExtendedPublicKey BIP32ExtendedPublicKeyPublicChild( diff --git a/rust-bindings/bls-dash-sys/c-bindings/elements.cpp b/rust-bindings/bls-dash-sys/c-bindings/elements.cpp index 5214bc4fc931..486ee32f4781 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/elements.cpp +++ b/rust-bindings/bls-dash-sys/c-bindings/elements.cpp @@ -23,11 +23,11 @@ int G1ElementSize() { return bls::G1Element::SIZE; } -G1Element G1ElementFromBytes(const void* data, bool legacy, bool* didErr) { +G1Element G1ElementFromBytes(const void* data, size_t len, bool legacy, bool* didErr) { bls::G1Element* el = nullptr; try { el = new bls::G1Element( - bls::G1Element::FromBytes(bls::Bytes((uint8_t*)(data), bls::G1Element::SIZE), legacy) + bls::G1Element::FromBytes(bls::Bytes((uint8_t*)data, len), legacy) ); } catch(const std::exception& ex) { gErrMsg = ex.what(); @@ -97,11 +97,11 @@ int G2ElementSize() { return bls::G2Element::SIZE; } -G2Element G2ElementFromBytes(const void* data, const bool legacy, bool* didErr) { +G2Element G2ElementFromBytes(const void* data, size_t len, const bool legacy, bool* didErr) { bls::G2Element* el = nullptr; try { el = new bls::G2Element( - bls::G2Element::FromBytes(bls::Bytes((uint8_t*)data, bls::G2Element::SIZE), legacy) + bls::G2Element::FromBytes(bls::Bytes((uint8_t*)data, len), legacy) ); *didErr = false; } catch(const std::exception& ex) { diff --git a/rust-bindings/bls-dash-sys/c-bindings/elements.h b/rust-bindings/bls-dash-sys/c-bindings/elements.h index fa1b5ae58117..e6912125375a 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/elements.h +++ b/rust-bindings/bls-dash-sys/c-bindings/elements.h @@ -27,7 +27,7 @@ typedef void* PrivateKey; // G1Element int G1ElementSize(); -G1Element G1ElementFromBytes(const void* data, const bool legacy, bool* didErr); +G1Element G1ElementFromBytes(const void* data, size_t len, const bool legacy, bool* didErr); G1Element G1ElementGenerator(); bool G1ElementIsValid(const G1Element el); uint32_t G1ElementGetFingerprint(const G1Element el, const bool legacy); @@ -41,7 +41,7 @@ void G1ElementFree(const G1Element el); // G2Element int G2ElementSize(); -G2Element G2ElementFromBytes(const void* data, const bool legacy, bool* didErr); +G2Element G2ElementFromBytes(const void* data, size_t len, const bool legacy, bool* didErr); G2Element G2ElementGenerator(); bool G2ElementIsValid(const G2Element el); bool G2ElementIsEqual(const G2Element el1, const G2Element el2); diff --git a/rust-bindings/bls-dash-sys/c-bindings/privatekey.cpp b/rust-bindings/bls-dash-sys/c-bindings/privatekey.cpp index 4e1d0df34661..ca1b2bcde719 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/privatekey.cpp +++ b/rust-bindings/bls-dash-sys/c-bindings/privatekey.cpp @@ -20,12 +20,12 @@ #include "utils.hpp" // private key bindings implementation -PrivateKey PrivateKeyFromBytes(const void* data, const bool modOrder, bool* didErr) { +PrivateKey PrivateKeyFromBytes(const void* data, size_t len, const bool modOrder, bool* didErr) { bls::PrivateKey* skPtr = nullptr; try { skPtr = new bls::PrivateKey( bls::PrivateKey::FromBytes( - bls::Bytes((uint8_t*)data, bls::PrivateKey::PRIVATE_KEY_SIZE), + bls::Bytes((uint8_t*)data, len), modOrder ) ); diff --git a/rust-bindings/bls-dash-sys/c-bindings/privatekey.h b/rust-bindings/bls-dash-sys/c-bindings/privatekey.h index 147e1a618f21..bba2837a1753 100644 --- a/rust-bindings/bls-dash-sys/c-bindings/privatekey.h +++ b/rust-bindings/bls-dash-sys/c-bindings/privatekey.h @@ -23,7 +23,7 @@ extern "C" { typedef void* PrivateKey; -PrivateKey PrivateKeyFromBytes(const void* data, const bool modOrder, bool* didErr); +PrivateKey PrivateKeyFromBytes(const void* data, size_t len, const bool modOrder, bool* didErr); PrivateKey PrivateKeyFromSeedBIP32(const void* data, const size_t len); PrivateKey PrivateKeyAggregate(void** sks, const size_t len); G1Element PrivateKeyGetG1Element(const PrivateKey sk, bool* didErr); diff --git a/rust-bindings/bls-signatures/src/bip32/private_key.rs b/rust-bindings/bls-signatures/src/bip32/private_key.rs index af4394f17979..08c217c2a881 100644 --- a/rust-bindings/bls-signatures/src/bip32/private_key.rs +++ b/rust-bindings/bls-signatures/src/bip32/private_key.rs @@ -47,7 +47,7 @@ impl ExtendedPrivateKey { } Ok(ExtendedPrivateKey { c_extended_private_key: c_err_to_result(|did_err| unsafe { - BIP32ExtendedPrivateKeyFromBytes(bytes.as_ptr() as *const _, did_err) + BIP32ExtendedPrivateKeyFromBytes(bytes.as_ptr() as *const _, bytes.len(), did_err) })?, }) } diff --git a/rust-bindings/bls-signatures/src/bip32/public_key.rs b/rust-bindings/bls-signatures/src/bip32/public_key.rs index 43fa484f0cc2..38265939dca1 100644 --- a/rust-bindings/bls-signatures/src/bip32/public_key.rs +++ b/rust-bindings/bls-signatures/src/bip32/public_key.rs @@ -42,7 +42,7 @@ impl ExtendedPublicKey { } Ok(ExtendedPublicKey { c_extended_public_key: c_err_to_result(|did_err| unsafe { - BIP32ExtendedPublicKeyFromBytes(bytes.as_ptr() as *const _, legacy, did_err) + BIP32ExtendedPublicKeyFromBytes(bytes.as_ptr() as *const _, bytes.len(), legacy, did_err) })?, }) } diff --git a/rust-bindings/bls-signatures/src/elements.rs b/rust-bindings/bls-signatures/src/elements.rs index b671bdf6b5c5..76de9a6e7bdf 100644 --- a/rust-bindings/bls-signatures/src/elements.rs +++ b/rust-bindings/bls-signatures/src/elements.rs @@ -75,7 +75,7 @@ impl G1Element { } Ok(G1Element { c_element: c_err_to_result(|did_err| unsafe { - G1ElementFromBytes(bytes.as_ptr() as *const _, legacy, did_err) + G1ElementFromBytes(bytes.as_ptr() as *const _, bytes.len(), legacy, did_err) })?, }) } @@ -234,7 +234,7 @@ impl G2Element { } Ok(G2Element { c_element: c_err_to_result(|did_err| unsafe { - G2ElementFromBytes(bytes.as_ptr() as *const _, legacy, did_err) + G2ElementFromBytes(bytes.as_ptr() as *const _, bytes.len(), legacy, did_err) })?, }) } diff --git a/rust-bindings/bls-signatures/src/private_key.rs b/rust-bindings/bls-signatures/src/private_key.rs index ab129856cdd5..556c968ed2a7 100755 --- a/rust-bindings/bls-signatures/src/private_key.rs +++ b/rust-bindings/bls-signatures/src/private_key.rs @@ -137,7 +137,7 @@ impl PrivateKey { } let c_private_key = c_err_to_result(|did_err| unsafe { - PrivateKeyFromBytes(bytes.as_ptr() as *const c_void, mod_order, did_err) + PrivateKeyFromBytes(bytes.as_ptr() as *const c_void, bytes.len(), mod_order, did_err) })?; Ok(PrivateKey { c_private_key }) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c3c1abd7593d..5802843a61ed 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -46,9 +46,23 @@ if(BUILD_BLS_TESTS) target_link_libraries(runtest PRIVATE dashbls PRIVATE catch2) + + if(EMSCRIPTEN) + target_link_options(runtest PRIVATE "SHELL:-s MALLOC=emmalloc") + if(CMAKE_BUILD_TYPE MATCHES "Debug") + target_link_options(runtest PRIVATE "SHELL:-s ASSERTIONS=2") + endif() + endif() endif() if(BUILD_BLS_BENCHMARKS) add_executable(runbench test-bench.cpp) target_link_libraries(runbench PRIVATE dashbls) + + if(EMSCRIPTEN) + target_link_options(runbench PRIVATE "SHELL:-s MALLOC=emmalloc") + if(CMAKE_BUILD_TYPE MATCHES "Debug") + target_link_options(runbench PRIVATE "SHELL:-s ASSERTIONS=2") + endif() + endif() endif() diff --git a/src/privatekey.cpp b/src/privatekey.cpp index d4dd32d11677..d07246d8575e 100644 --- a/src/privatekey.cpp +++ b/src/privatekey.cpp @@ -300,7 +300,9 @@ G2Element PrivateKey::SignG2( { CheckKeyData(); - g2_st* pt = Util::SecAlloc(1); + g2_t pt; + g2_null(pt); + g2_new(pt); if (fLegacy) { ep2_map_legacy(pt, msg, BLS::MESSAGE_HASH_LEN); @@ -310,7 +312,7 @@ G2Element PrivateKey::SignG2( g2_mul(pt, pt, keydata); G2Element ret = G2Element::FromNative(pt); - Util::SecFree(pt); + g2_free(pt); return ret; } diff --git a/src/schemes.cpp b/src/schemes.cpp index 003a44a46707..c0d7ad294c64 100644 --- a/src/schemes.cpp +++ b/src/schemes.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include @@ -26,21 +27,23 @@ using std::vector; namespace bls { -static void HashPubKeys(bn_t* computedTs, std::vector vecPubKeyBytes) +template +static void HashPubKeys(bn_t* computedTs, size_t nPubKeys, GetBytesFn getBytes) { bn_t order; bn_new(order); g2_get_ord(order); - std::vector vecBuffer(vecPubKeyBytes.size() * G1Element::SIZE); + std::vector vecBuffer(nPubKeys * G1Element::SIZE); - for (size_t i = 0; i < vecPubKeyBytes.size(); i++) { - memcpy(vecBuffer.data() + i * G1Element::SIZE, vecPubKeyBytes[i].begin(), G1Element::SIZE); + for (size_t i = 0; i < nPubKeys; i++) { + const uint8_t* pkBytes = getBytes(i); + memcpy(vecBuffer.data() + i * G1Element::SIZE, pkBytes, G1Element::SIZE); } uint8_t pkHash[32]; - Util::Hash256(pkHash, vecBuffer.data(), vecPubKeyBytes.size() * G1Element::SIZE); - for (size_t i = 0; i < vecPubKeyBytes.size(); ++i) { + Util::Hash256(pkHash, vecBuffer.data(), nPubKeys * G1Element::SIZE); + for (size_t i = 0; i < nPubKeys; ++i) { uint8_t hash[32]; uint8_t buffer[4 + 32]; memset(buffer, 0, 4); @@ -53,6 +56,7 @@ static void HashPubKeys(bn_t* computedTs, std::vector vecPubKeyBytes) bn_read_bin(computedTs[i], hash, 32); bn_mod_basic(computedTs[i], computedTs[i], order); } + bn_free(order); } enum InvariantResult { BAD=false, GOOD=true, CONTINUE }; @@ -133,39 +137,41 @@ bool CoreMPL::Verify(const G1Element& pubkey, const Bytes& message, const G2Elem { const G2Element hashedPoint = G2Element::FromMessage(message, (const uint8_t*)strCiphersuiteId.c_str(), strCiphersuiteId.length()); - std::vector vecG1(2); - std::vector vecG2(2); + std::array g1s; + std::array g2s; - G1Element::Generator().Negate().ToNative(&vecG1[0]); + G1Element::Generator().Negate().ToNative(g1s[0]); if (!pubkey.IsValid()) { return false; } if (!signature.IsValid()) { return false; } - pubkey.ToNative(&vecG1[1]); - signature.ToNative(&vecG2[0]); - hashedPoint.ToNative(&vecG2[1]); + pubkey.ToNative(g1s[1]); + signature.ToNative(g2s[0]); + hashedPoint.ToNative(g2s[1]); - return CoreMPL::NativeVerify((g1_t*)vecG1.data(), (g2_t*)vecG2.data(), 2); + return CoreMPL::NativeVerify(g1s.data(), g2s.data(), 2); } -vector CoreMPL::Aggregate(const vector> &signatures) +std::array CoreMPL::Aggregate(const vector> &signatures) { vector elements; + elements.reserve(signatures.size()); for (const vector& signature : signatures) { elements.push_back(G2Element::FromByteVector(signature)); } - return CoreMPL::Aggregate(elements).Serialize(); + return CoreMPL::Aggregate(elements).SerializeToArray(); } -vector CoreMPL::Aggregate(const vector& signatures) +std::array CoreMPL::Aggregate(const vector& signatures) { vector elements; + elements.reserve(signatures.size()); for (const Bytes& signature : signatures) { elements.push_back(G2Element::FromBytes(signature)); } - return CoreMPL::Aggregate(elements).Serialize(); + return CoreMPL::Aggregate(elements).SerializeToArray(); } G2Element CoreMPL::Aggregate(const vector &signatures) @@ -195,22 +201,17 @@ G2Element CoreMPL::AggregateSecure(std::vector const &vecPublicKeys, } bn_t* computedTs = new bn_t[vecPublicKeys.size()]; - std::vector, const G2Element*>> vecSorted(vecPublicKeys.size()); + std::vector, const G2Element*>> vecSorted(vecPublicKeys.size()); for (size_t i = 0; i < vecPublicKeys.size(); i++) { bn_new(computedTs[i]); - vecSorted[i] = std::make_pair(vecPublicKeys[i].Serialize(fLegacy), &vecSignatures[i]); + vecSorted[i] = std::make_pair(vecPublicKeys[i].SerializeToArray(fLegacy), &vecSignatures[i]); } std::sort(vecSorted.begin(), vecSorted.end(), [](const auto& a, const auto& b) { return std::memcmp(a.first.data(), b.first.data(), G1Element::SIZE) < 0; }); - std::vector vecPublicKeyBytes; - vecPublicKeyBytes.reserve(vecPublicKeys.size()); - for (const auto& it : vecSorted) { - vecPublicKeyBytes.push_back(Bytes{it.first}); - } - - HashPubKeys(computedTs, vecPublicKeyBytes); + HashPubKeys(computedTs, vecSorted.size(), + [&](size_t i) { return vecSorted[i].first.data(); }); // Raise all signatures to power of the corresponding t's and aggregate the results into aggSig // Also accumulates aggregation info for each signature @@ -222,6 +223,9 @@ G2Element CoreMPL::AggregateSecure(std::vector const &vecPublicKeys, G2Element aggSig = CoreMPL::Aggregate(expSigs); + for (size_t i = 0; i < vecPublicKeys.size(); i++) { + bn_free(computedTs[i]); + } delete[] computedTs; return aggSig; @@ -237,30 +241,28 @@ bool CoreMPL::VerifySecure(const std::vector& vecPublicKeys, const G2Element& signature, const Bytes& message, const bool fLegacy) { - bn_t one; - bn_new(one); - bn_zero(one); - bn_set_dig(one, 1); - bn_t* computedTs = new bn_t[vecPublicKeys.size()]; - std::vector> vecSorted(vecPublicKeys.size()); + std::vector> vecSorted(vecPublicKeys.size()); for (size_t i = 0; i < vecPublicKeys.size(); i++) { bn_new(computedTs[i]); - vecSorted[i] = vecPublicKeys[i].Serialize(fLegacy); + vecSorted[i] = vecPublicKeys[i].SerializeToArray(fLegacy); } std::sort(vecSorted.begin(), vecSorted.end(), [](const auto& a, const auto& b) -> bool { return std::memcmp(a.data(), b.data(), G1Element::SIZE) < 0; }); - HashPubKeys(computedTs, {vecSorted.begin(), vecSorted.end()}); + HashPubKeys(computedTs, vecSorted.size(), + [&](size_t i) { return vecSorted[i].data(); }); G1Element publicKey; for (size_t i = 0; i < vecSorted.size(); ++i) { G1Element g1 = G1Element::FromBytes(Bytes(vecSorted[i]), fLegacy); - publicKey = CoreMPL::Aggregate({publicKey, g1 * computedTs[i]}); + publicKey += g1 * computedTs[i]; } - bn_free(one); + for (size_t i = 0; i < vecPublicKeys.size(); i++) { + bn_free(computedTs[i]); + } delete[] computedTs; return AggregateVerify({publicKey}, {message}, {signature}); @@ -293,6 +295,7 @@ bool CoreMPL::AggregateVerify(const vector& pubkeys, } vector pubkeyElements; + pubkeyElements.reserve(nPubKeys); for (size_t i = 0; i < nPubKeys; ++i) { pubkeyElements.push_back(G1Element::FromBytes(pubkeys[i])); } @@ -573,14 +576,14 @@ bool AugSchemeMPL::AggregateVerify(const vector& pubkeys, G2Element PopSchemeMPL::PopProve(const PrivateKey &seckey) { const G1Element& pk = seckey.GetG1Element(); - const G2Element hashedKey = G2Element::FromMessage(pk.Serialize(), (const uint8_t *)POP_CIPHERSUITE_ID.c_str(), POP_CIPHERSUITE_ID.length()); + const G2Element hashedKey = G2Element::FromMessage(pk.SerializeToArray(), (const uint8_t *)POP_CIPHERSUITE_ID.c_str(), POP_CIPHERSUITE_ID.length()); return seckey.GetG2Power(hashedKey); } bool PopSchemeMPL::PopVerify(const G1Element &pubkey, const G2Element &signature_proof) { - const G2Element hashedPoint = G2Element::FromMessage(pubkey.Serialize(), (const uint8_t*)POP_CIPHERSUITE_ID.c_str(), POP_CIPHERSUITE_ID.length()); + const G2Element hashedPoint = G2Element::FromMessage(pubkey.SerializeToArray(), (const uint8_t*)POP_CIPHERSUITE_ID.c_str(), POP_CIPHERSUITE_ID.length()); g1_t g1s[2]; g2_t g2s[2]; diff --git a/src/test.cpp b/src/test.cpp index 51d84706a83c..7e9d5280b3b5 100644 --- a/src/test.cpp +++ b/src/test.cpp @@ -1010,8 +1010,8 @@ TEST_CASE("Advanced") { vector> vecG2Vector = {g2BasicSignVector1.Serialize(), g2BasicSign3.Serialize()}; vector> vecHashes = {vecHash, vecG2Element}; - vector aggVector = BasicSchemeMPL().Aggregate(vecG2Vector); - vector aggBytes = BasicSchemeMPL().Aggregate(vector{vecG2Vector.begin(), vecG2Vector.end()}); + auto aggVector = BasicSchemeMPL().Aggregate(vecG2Vector); + auto aggBytes = BasicSchemeMPL().Aggregate(vector{vecG2Vector.begin(), vecG2Vector.end()}); REQUIRE(aggVector == aggBytes); REQUIRE(BasicSchemeMPL().AggregateVerify(vector{vecG1Vector.begin(), vecG1Vector.end()}, @@ -1019,7 +1019,7 @@ TEST_CASE("Advanced") { Bytes(aggVector))); REQUIRE(BasicSchemeMPL().AggregateVerify({g1_1, g1_3}, vector{vecHashes.begin(), vecHashes.end()}, - G2Element::FromByteVector(aggVector))); + G2Element::FromBytes(aggVector))); G2Element g2AugSignVector1 = AugSchemeMPL().Sign(pk1, vecHash); G2Element g2AugSignBytes1 = AugSchemeMPL().Sign(pk1, Bytes(vecHash)); @@ -1033,8 +1033,8 @@ TEST_CASE("Advanced") { vector> vecG1AugVector = {g1_1.Serialize(), g1_2.Serialize()}; vector> vecG2AugVector = {g2AugSignVector1.Serialize(), g2AugSign2.Serialize()}; - vector aggAugVector = AugSchemeMPL().Aggregate(vecG2AugVector); - vector aggAugBytes = AugSchemeMPL().Aggregate(vector{vecG2AugVector.begin(), vecG2AugVector.end()}); + auto aggAugVector = AugSchemeMPL().Aggregate(vecG2AugVector); + auto aggAugBytes = AugSchemeMPL().Aggregate(vector{vecG2AugVector.begin(), vecG2AugVector.end()}); REQUIRE(aggAugVector == aggAugBytes); REQUIRE(AugSchemeMPL().AggregateVerify(vector{vecG1AugVector.begin(), vecG1AugVector.end()}, @@ -1042,7 +1042,7 @@ TEST_CASE("Advanced") { Bytes(aggAugVector))); REQUIRE(AugSchemeMPL().AggregateVerify({g1_1, g1_2}, vector{vecHashes.begin(), vecHashes.end()}, - G2Element::FromByteVector(aggAugVector))); + G2Element::FromBytes(aggAugVector))); G2Element proof = PopSchemeMPL().PopProve(pk1); REQUIRE(PopSchemeMPL().PopVerify(g1_1, proof)); @@ -1063,9 +1063,9 @@ TEST_CASE("Schemes") { SECTION("Basic Scheme") { vector seed1(32, 0x04); vector seed2(32, 0x05); - vector msg1 = {7, 8, 9}; - vector msg2 = {10, 11, 12}; - vector> msgs = {msg1, msg2}; + std::array msg1 = {7, 8, 9}; + std::array msg2 = {10, 11, 12}; + vector msgs = {Bytes{msg1}, Bytes{msg2}}; PrivateKey sk1 = BasicSchemeMPL().KeyGen(seed1); G1Element pk1 = BasicSchemeMPL().SkToG1(sk1); @@ -1093,7 +1093,7 @@ TEST_CASE("Schemes") { REQUIRE(BasicSchemeMPL().Verify(pk2v, msg1, sig1v) == false); G2Element aggsig = BasicSchemeMPL().Aggregate({sig1, sig2}); - vector aggsigv = BasicSchemeMPL().Aggregate(vector>{sig1v, sig2v}); + auto aggsigv = BasicSchemeMPL().Aggregate(vector>{sig1v, sig2v}); REQUIRE(BasicSchemeMPL().AggregateVerify({pk1, pk2}, msgs, aggsig)); REQUIRE(BasicSchemeMPL().AggregateVerify({pk1v, pk2v}, msgs, aggsigv)); } @@ -1102,9 +1102,9 @@ TEST_CASE("Schemes") { { vector seed1(32, 0x04); vector seed2(32, 0x05); - vector msg1 = {7, 8, 9}; - vector msg2 = {10, 11, 12}; - vector> msgs = {msg1, msg2}; + std::array msg1 = {7, 8, 9}; + std::array msg2 = {10, 11, 12}; + vector msgs = {Bytes{msg1}, Bytes{msg2}}; PrivateKey sk1 = AugSchemeMPL().KeyGen(seed1); G1Element pk1 = AugSchemeMPL().SkToG1(sk1); @@ -1132,18 +1132,18 @@ TEST_CASE("Schemes") { REQUIRE(AugSchemeMPL().Verify(pk2v, msg1, sig1v) == false); G2Element aggsig = AugSchemeMPL().Aggregate({sig1, sig2}); - vector aggsigv = AugSchemeMPL().Aggregate(vector>{sig1v, sig2v}); + auto aggsigv = AugSchemeMPL().Aggregate(vector>{sig1v, sig2v}); REQUIRE(AugSchemeMPL().AggregateVerify({pk1, pk2}, msgs, aggsig)); - REQUIRE(AugSchemeMPL().AggregateVerify({pk1v, pk2v}, msgs, aggsigv)); + REQUIRE(AugSchemeMPL().AggregateVerify({pk1v, pk2v}, msgs, Bytes(aggsigv.data(), aggsigv.size()))); } SECTION("Pop Scheme") { vector seed1(32, 0x06); vector seed2(32, 0x07); - vector msg1 = {7, 8, 9}; - vector msg2 = {10, 11, 12}; - vector> msgs = {msg1, msg2}; + std::array msg1 = {7, 8, 9}; + std::array msg2 = {10, 11, 12}; + vector msgs = {Bytes{msg1}, Bytes{msg2}}; PrivateKey sk1 = PopSchemeMPL().KeyGen(seed1); G1Element pk1 = PopSchemeMPL().SkToG1(sk1); @@ -1171,9 +1171,9 @@ TEST_CASE("Schemes") { REQUIRE(PopSchemeMPL().Verify(pk2v, msg1, sig1v) == false); G2Element aggsig = PopSchemeMPL().Aggregate({sig1, sig2}); - vector aggsigv = PopSchemeMPL().Aggregate(vector>{sig1v, sig2v}); + auto aggsigv = PopSchemeMPL().Aggregate(vector>{sig1v, sig2v}); REQUIRE(PopSchemeMPL().AggregateVerify({pk1, pk2}, msgs, aggsig)); - REQUIRE(PopSchemeMPL().AggregateVerify({pk1v, pk2v}, msgs, aggsigv)); + REQUIRE(PopSchemeMPL().AggregateVerify({pk1v, pk2v}, msgs, Bytes(aggsigv.data(), aggsigv.size()))); // PopVerify G2Element proof1 = PopSchemeMPL().PopProve(sk1); @@ -1186,7 +1186,7 @@ TEST_CASE("Schemes") { G2Element sig2_same = PopSchemeMPL().Sign(sk2, msg1); vector sig2v_same = PopSchemeMPL().Sign(sk2, msg1).Serialize(); G2Element aggsig_same = PopSchemeMPL().Aggregate({sig1, sig2_same}); - vector aggsigv_same = + auto aggsigv_same = PopSchemeMPL().Aggregate(vector>{sig1v, sig2v_same}); REQUIRE( PopSchemeMPL().FastAggregateVerify({pk1, pk2}, msg1, aggsig_same)); diff --git a/src/threshold.cpp b/src/threshold.cpp index 723b24410168..73a3023b28da 100644 --- a/src/threshold.cpp +++ b/src/threshold.cpp @@ -92,10 +92,17 @@ namespace bls { struct PolyOpsBase { bn_t order; + bn_t iv; PolyOpsBase() { bn_new(order); gt_get_ord(order); + bn_new(iv); + } + + ~PolyOpsBase() { + bn_free(iv); + bn_free(order); } void MulFP(bn_t& r, const bn_t& a, const bn_t& b) { @@ -114,8 +121,6 @@ namespace bls { } void DivFP(bn_t& r, const bn_t& a, const bn_t& b) { - bn_t iv; - bn_new(iv); fp_inv_exgcd_bn(iv, b, order); bn_mul(r, a, iv); ModOrder(r); @@ -143,7 +148,7 @@ namespace bls { template<> struct PolyOps : PolyOpsBase { G1Element Add(const G1Element& a, const G1Element& b) { - return pThresholdScheme->Aggregate({a, b}); + return a + b; } G1Element Mul(const G1Element& a, const bn_t& b) { @@ -154,7 +159,7 @@ namespace bls { template<> struct PolyOps : PolyOpsBase { G2Element Add(const G2Element& a, const G2Element& b) { - return pThresholdScheme->Aggregate({a, b}); + return a + b; } G2Element Mul(const G2Element& a, bn_t& b) { From 0ff505cd1713570ca0d70613c3823de0663da8d8 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 17 Dec 2024 04:34:08 +0000 Subject: [PATCH 539/656] build: stop tracking cmake dependency relic_conf.h.in --- .../depends/relic/include/relic_conf.h.in | 717 ------------------ 1 file changed, 717 deletions(-) delete mode 100644 src/dashbls/depends/relic/include/relic_conf.h.in diff --git a/src/dashbls/depends/relic/include/relic_conf.h.in b/src/dashbls/depends/relic/include/relic_conf.h.in deleted file mode 100644 index 7db6f5b509c4..000000000000 --- a/src/dashbls/depends/relic/include/relic_conf.h.in +++ /dev/null @@ -1,717 +0,0 @@ -/* - * RELIC is an Efficient LIbrary for Cryptography - * Copyright (c) 2009 RELIC Authors - * - * This file is part of RELIC. RELIC is legal property of its developers, - * whose names are not listed here. Please refer to the COPYRIGHT file - * for contact information. - * - * RELIC is free software; you can redistribute it and/or modify it under the - * terms of the version 2.1 (or later) of the GNU Lesser General Public License - * as published by the Free Software Foundation; or version 2.0 of the Apache - * License as published by the Apache Software Foundation. See the LICENSE files - * for more details. - * - * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR - * A PARTICULAR PURPOSE. See the LICENSE files for more details. - * - * You should have received a copy of the GNU Lesser General Public or the - * Apache License along with RELIC. If not, see - * or . - */ - -/** - * @file - * - * Project configuration. - * - * @version $Id: relic_conf.h.in 45 2009-07-04 23:45:48Z dfaranha $ - * @ingroup relic - */ - -#ifndef RLC_CONF_H -#define RLC_CONF_H - -/** Project version. */ -#define RLC_VERSION "@VERSION@" - -/** Debugging support. */ -#cmakedefine DEBUG -/** Profiling support. */ -#cmakedefine PROFL -/** Error handling support. */ -#cmakedefine CHECK -/** Verbose error messages. */ -#cmakedefine VERBS -/** Build with overhead estimation. */ -#cmakedefine OVERH -/** Build documentation. */ -#cmakedefine DOCUM -/** Build only the selected algorithms. */ -#cmakedefine STRIP -/** Build with printing disabled. */ -#cmakedefine QUIET -/** Build with colored output. */ -#cmakedefine COLOR -/** Build with big-endian support. */ -#cmakedefine BIGED -/** Build shared library. */ -#cmakedefine SHLIB -/** Build static library. */ -#cmakedefine STLIB - -/** Number of times each test is ran. */ -#define TESTS @TESTS@ -/** Number of times each benchmark is ran. */ -#define BENCH @BENCH@ -/** Number of available cores. */ -#define CORES @CORES@ - -/** Atmel AVR ATMega128 8-bit architecture. */ -#define AVR 1 -/** MSP430 16-bit architecture. */ -#define MSP 2 -/** ARM 32-bit architecture. */ -#define ARM 3 -/** Intel x86-compatible 32-bit architecture. */ -#define X86 4 -/** AMD64-compatible 64-bit architecture. */ -#define X64 5 -/** Architecture. */ -#cmakedefine ARCH @ARCH@ - -/** Size of word in this architecture. */ -#define WSIZE @WSIZE@ - -/** Byte boundary to align digit vectors. */ -#define ALIGN @ALIGN@ - -/** Build multiple precision integer module. */ -#cmakedefine WITH_BN -/** Build prime field module. */ -#cmakedefine WITH_FP -/** Build prime field extension module. */ -#cmakedefine WITH_FPX -/** Build binary field module. */ -#cmakedefine WITH_FB -/** Build prime elliptic curve module. */ -#cmakedefine WITH_EP -/** Build prime field extension elliptic curve module. */ -#cmakedefine WITH_EPX -/** Build binary elliptic curve module. */ -#cmakedefine WITH_EB -/** Build elliptic Edwards curve module. */ -#cmakedefine WITH_ED -/** Build elliptic curve cryptography module. */ -#cmakedefine WITH_EC -/** Build pairings over prime curves module. */ -#cmakedefine WITH_PP -/** Build pairing-based cryptography module. */ -#cmakedefine WITH_PC -/** Build block ciphers. */ -#cmakedefine WITH_BC -/** Build hash functions. */ -#cmakedefine WITH_MD -/** Build cryptographic protocols. */ -#cmakedefine WITH_CP -/** Build Multi-party computation primitives. */ -#cmakedefine WITH_MPC - -/** Easy C-only backend. */ -#define EASY 1 -/** GMP backend. */ -#define GMP 2 -/** GMP constant-time backend. */ -#define GMP_SEC 3 -/** Arithmetic backend. */ -#define ARITH @ARITH@ - -/** Required precision in bits. */ -#define BN_PRECI @BN_PRECI@ -/** A multiple precision integer can store w words. */ -#define SINGLE 0 -/** A multiple precision integer can store the result of an addition. */ -#define CARRY 1 -/** A multiple precision integer can store the result of a multiplication. */ -#define DOUBLE 2 -/** Effective size of a multiple precision integer. */ -#define BN_MAGNI @BN_MAGNI@ -/** Number of Karatsuba steps. */ -#define BN_KARAT @BN_KARAT@ - -/** Schoolbook multiplication. */ -#define BASIC 1 -/** Comba multiplication. */ -#define COMBA 2 -/** Chosen multiple precision multiplication method. */ -#define BN_MUL @BN_MUL@ - -/** Schoolbook squaring. */ -#define BASIC 1 -/** Comba squaring. */ -#define COMBA 2 -/** Reuse multiplication for squaring. */ -#define MULTP 4 -/** Chosen multiple precision multiplication method. */ -#define BN_SQR @BN_SQR@ - -/** Division modular reduction. */ -#define BASIC 1 -/** Barrett modular reduction. */ -#define BARRT 2 -/** Montgomery modular reduction. */ -#define MONTY 3 -/** Pseudo-Mersenne modular reduction. */ -#define PMERS 4 -/** Chosen multiple precision modular reduction method. */ -#define BN_MOD @BN_MOD@ - -/** Binary modular exponentiation. */ -#define BASIC 1 -/** Sliding window modular exponentiation. */ -#define SLIDE 2 -/** Montgomery powering ladder. */ -#define MONTY 3 -/** Chosen multiple precision modular exponentiation method. */ -#define BN_MXP @BN_MXP@ - -/** Basic Euclidean GCD Algorithm. */ -#define BASIC 1 -/** Lehmer's fast GCD Algorithm. */ -#define LEHME 2 -/** Stein's binary GCD Algorithm. */ -#define STEIN 3 -/** Chosen multiple precision greatest common divisor method. */ -#define BN_GCD @BN_GCD@ - -/** Basic prime generation. */ -#define BASIC 1 -/** Safe prime generation. */ -#define SAFEP 2 -/** Strong prime generation. */ -#define STRON 3 -/** Chosen prime generation algorithm. */ -#define BN_GEN @BN_GEN@ - -/** Multiple precision arithmetic method */ -#define BN_METHD "@BN_METHD@" - -/** Prime field size in bits. */ -#define FP_PRIME @FP_PRIME@ -/** Number of Karatsuba steps. */ -#define FP_KARAT @FP_KARAT@ -/** Prefer Pseudo-Mersenne primes over random primes. */ -#cmakedefine FP_PMERS -/** Use -1 as quadratic non-residue. */ -#cmakedefine FP_QNRES -/** Width of window processing for exponentiation methods. */ -#define FP_WIDTH @FP_WIDTH@ - -/** Schoolbook addition. */ -#define BASIC 1 -/** Integrated modular addtion. */ -#define INTEG 3 -/** Chosen prime field multiplication method. */ -#define FP_ADD @FP_ADD@ - -/** Schoolbook multiplication. */ -#define BASIC 1 -/** Comba multiplication. */ -#define COMBA 2 -/** Integrated modular multiplication. */ -#define INTEG 3 -/** Chosen prime field multiplication method. */ -#define FP_MUL @FP_MUL@ - -/** Schoolbook squaring. */ -#define BASIC 1 -/** Comba squaring. */ -#define COMBA 2 -/** Integrated modular squaring. */ -#define INTEG 3 -/** Reuse multiplication for squaring. */ -#define MULTP 4 -/** Chosen prime field multiplication method. */ -#define FP_SQR @FP_SQR@ - -/** Division-based reduction. */ -#define BASIC 1 -/** Fast reduction modulo special form prime. */ -#define QUICK 2 -/** Montgomery modular reduction. */ -#define MONTY 3 -/** Chosen prime field reduction method. */ -#define FP_RDC @FP_RDC@ - -/** Inversion by Fermat's Little Theorem. */ -#define BASIC 1 -/** Binary inversion. */ -#define BINAR 2 -/** Integrated modular multiplication. */ -#define MONTY 3 -/** Extended Euclidean algorithm. */ -#define EXGCD 4 -/** Constant-time inversion by Bernstein-Yang division steps. */ -#define DIVST 5 -/** Use implementation provided by the lower layer. */ -#define LOWER 8 -/** Chosen prime field inversion method. */ -#define FP_INV @FP_INV@ - -/** Binary modular exponentiation. */ -#define BASIC 1 -/** Sliding window modular exponentiation. */ -#define SLIDE 2 -/** Constant-time Montgomery powering ladder. */ -#define MONTY 3 -/** Chosen multiple precision modular exponentiation method. */ -#define FP_EXP @FP_EXP@ - -/** Prime field arithmetic method */ -#define FP_METHD "@FP_METHD@" - -/** Basic quadratic extension field arithmetic. */ -#define BASIC 1 -/** Integrated extension field arithmetic. */ -#define INTEG 3 -/* Chosen extension field arithmetic method. */ -#define FPX_QDR @FPX_QDR@ - -/** Basic cubic extension field arithmetic. */ -#define BASIC 1 -/** Integrated extension field arithmetic. */ -#define INTEG 3 -/* Chosen extension field arithmetic method. */ -#define FPX_CBC @FPX_CBC@ - -/** Basic quadratic extension field arithmetic. */ -#define BASIC 1 -/** Lazy-reduced extension field arithmetic. */ -#define LAZYR 2 -/* Chosen extension field arithmetic method. */ -#define FPX_RDC @FPX_RDC@ - -/** Prime extension field arithmetic method */ -#define FPX_METHD "@FPX_METHD@" - -/** Irreducible polynomial size in bits. */ -#define FB_POLYN @FB_POLYN@ -/** Number of Karatsuba steps. */ -#define FB_KARAT @FB_KARAT@ -/** Prefer trinomials over pentanomials. */ -#cmakedefine FB_TRINO -/** Prefer square-root friendly polynomials. */ -#cmakedefine FB_SQRTF -/** Precompute multiplication table for sqrt(z). */ -#cmakedefine FB_PRECO -/** Width of window processing for exponentiation methods. */ -#define FB_WIDTH @FB_WIDTH@ - -/** Shift-and-add multiplication. */ -#define BASIC 1 -/** Lopez-Dahab multiplication. */ -#define LODAH 2 -/** Integrated modular multiplication. */ -#define INTEG 3 -/** Chosen binary field multiplication method. */ -#define FB_MUL @FB_MUL@ - -/** Basic squaring. */ -#define BASIC 1 -/** Table-based squaring. */ -#define QUICK 2 -/** Integrated modular squaring. */ -#define INTEG 3 -/** Chosen binary field squaring method. */ -#define FB_SQR @FB_SQR@ - -/** Shift-and-add modular reduction. */ -#define BASIC 1 -/** Fast reduction modulo a trinomial or pentanomial. */ -#define QUICK 2 -/** Chosen binary field modular reduction method. */ -#define FB_RDC @FB_RDC@ - -/** Square root by repeated squaring. */ -#define BASIC 1 -/** Fast square root extraction. */ -#define QUICK 2 -/** Chosen binary field modular reduction method. */ -#define FB_SRT @FB_SRT@ - -/** Trace by repeated squaring. */ -#define BASIC 1 -/** Fast trace computation. */ -#define QUICK 2 -/** Chosen trace computation method. */ -#define FB_TRC @FB_TRC@ - -/** Solve by half-trace computation. */ -#define BASIC 1 -/** Solve with precomputed half-traces. */ -#define QUICK 2 -/** Chosen method to solve a quadratic equation. */ -#define FB_SLV @FB_SLV@ - -/** Inversion by Fermat's Little Theorem. */ -#define BASIC 1 -/** Binary inversion. */ -#define BINAR 2 -/** Almost inverse algorithm. */ -#define ALMOS 3 -/** Extended Euclidean algorithm. */ -#define EXGCD 4 -/** Itoh-Tsuji inversion. */ -#define ITOHT 5 -/** Hardware-friendly inversion by Brunner-Curiger-Hofstetter.*/ -#define BRUCH 6 -/** Constant-time version of almost inverse. */ -#define CTAIA 7 -/** Use implementation provided by the lower layer. */ -#define LOWER 8 -/** Chosen binary field inversion method. */ -#define FB_INV @FB_INV@ - -/** Binary modular exponentiation. */ -#define BASIC 1 -/** Sliding window modular exponentiation. */ -#define SLIDE 2 -/** Constant-time Montgomery powering ladder. */ -#define MONTY 3 -/** Chosen multiple precision modular exponentiation method. */ -#define FB_EXP @FB_EXP@ - -/** Iterated squaring/square-root by consecutive squaring/square-root. */ -#define BASIC 1 -/** Iterated squaring/square-root by table-based method. */ -#define QUICK 2 -/** Chosen method to solve a quadratic equation. */ -#define FB_ITR @FB_ITR@ - -/** Binary field arithmetic method */ -#define FB_METHD "@FB_METHD@" - -/** Support for ordinary curves. */ -#cmakedefine EP_PLAIN -/** Support for supersingular curves. */ -#cmakedefine EP_SUPER -/** Support for prime curves with efficient endormorphisms. */ -#cmakedefine EP_ENDOM -/** Use mixed coordinates. */ -#cmakedefine EP_MIXED -/** Build precomputation table for generator. */ -#cmakedefine EP_PRECO -/** Enable isogeny map for SSWU map-to-curve. */ -#cmakedefine EP_CTMAP -/** Width of precomputation table for fixed point methods. */ -#define EP_DEPTH @EP_DEPTH@ -/** Width of window processing for unknown point methods. */ -#define EP_WIDTH @EP_WIDTH@ - -/** Affine coordinates. */ -#define BASIC 1 -/** Projective coordinates. */ -#define PROJC 2 -/** Jacobian coordinates. */ -#define JACOB 3 -/** Chosen prime elliptic curve coordinate method. */ -#define EP_ADD @EP_ADD@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** Sliding window. */ -#define SLIDE 2 -/** Montgomery powering ladder. */ -#define MONTY 3 -/** Left-to-right Width-w NAF. */ -#define LWNAF 4 -/** Left-to-right Width-w NAF. */ -#define LWREG 5 -/** Chosen prime elliptic curve point multiplication method. */ -#define EP_MUL @EP_MUL@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** Single-table comb method. */ -#define COMBS 2 -/** Double-table comb method. */ -#define COMBD 3 -/** Left-to-right Width-w NAF. */ -#define LWNAF 4 -/** Chosen prime elliptic curve point multiplication method. */ -#define EP_FIX @EP_FIX@ - -/** Basic simultaneouns point multiplication. */ -#define BASIC 1 -/** Shamir's trick. */ -#define TRICK 2 -/** Interleaving of w-(T)NAFs. */ -#define INTER 3 -/** Joint sparse form. */ -#define JOINT 4 -/** Chosen prime elliptic curve simulteanous point multiplication method. */ -#define EP_SIM @EP_SIM@ - -/** Prime elliptic curve arithmetic method. */ -#define EP_METHD "@EP_METHD@" - -/** Support for ordinary curves without endormorphisms. */ -#cmakedefine EB_PLAIN -/** Support for Koblitz anomalous binary curves. */ -#cmakedefine EB_KBLTZ -/** Use mixed coordinates. */ -#cmakedefine EB_MIXED -/** Build precomputation table for generator. */ -#cmakedefine EB_PRECO -/** Width of precomputation table for fixed point methods. */ -#define EB_DEPTH @EB_DEPTH@ -/** Width of window processing for unknown point methods. */ -#define EB_WIDTH @EB_WIDTH@ - -/** Binary elliptic curve arithmetic method. */ -#define EB_METHD "@EB_METHD@" - -/** Affine coordinates. */ -#define BASIC 1 -/** López-Dahab Projective coordinates. */ -#define PROJC 2 -/** Chosen binary elliptic curve coordinate method. */ -#define EB_ADD @EB_ADD@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** L�pez-Dahab point multiplication. */ -#define LODAH 2 -/** Halving. */ -#define HALVE 3 -/** Left-to-right width-w (T)NAF. */ -#define LWNAF 4 -/** Right-to-left width-w (T)NAF. */ -#define RWNAF 5 -/** Chosen binary elliptic curve point multiplication method. */ -#define EB_MUL @EB_MUL@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** Single-table comb method. */ -#define COMBS 2 -/** Double-table comb method. */ -#define COMBD 3 -/** Left-to-right Width-w NAF. */ -#define LWNAF 4 -/** Chosen binary elliptic curve point multiplication method. */ -#define EB_FIX @EB_FIX@ - -/** Basic simultaneouns point multiplication. */ -#define BASIC 1 -/** Shamir's trick. */ -#define TRICK 2 -/** Interleaving of w-(T)NAFs. */ -#define INTER 3 -/** Joint sparse form. */ -#define JOINT 4 -/** Chosen binary elliptic curve simulteanous point multiplication method. */ -#define EB_SIM @EB_SIM@ - -/** Build precomputation table for generator. */ -#cmakedefine ED_PRECO -/** Width of precomputation table for fixed point methods. */ -#define ED_DEPTH @ED_DEPTH@ -/** Width of window processing for unknown point methods. */ -#define ED_WIDTH @ED_WIDTH@ - -/** Edwards elliptic curve arithmetic method. */ -#define ED_METHD "@ED_METHD@" - -/** Affine coordinates. */ -#define BASIC 1 -/** Simple projective twisted Edwards coordinates */ -#define PROJC 2 -/** Extended projective twisted Edwards coordinates */ -#define EXTND 3 -/** Chosen binary elliptic curve coordinate method. */ -#define ED_ADD @ED_ADD@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** Sliding window. */ -#define SLIDE 2 -/** Montgomery powering ladder. */ -#define MONTY 3 -/** Left-to-right Width-w NAF. */ -#define LWNAF 4 -/** Left-to-right Width-w NAF. */ -#define LWREG 5 -/** Chosen prime elliptic twisted Edwards curve point multiplication method. */ -#define ED_MUL @ED_MUL@ - -/** Binary point multiplication. */ -#define BASIC 1 -/** Single-table comb method. */ -#define COMBS 2 -/** Double-table comb method. */ -#define COMBD 3 -/** Left-to-right Width-w NAF. */ -#define LWNAF 4 -/** Chosen prime elliptic twisted Edwards curve point multiplication method. */ -#define ED_FIX @ED_FIX@ - -/** Basic simultaneouns point multiplication. */ -#define BASIC 1 -/** Shamir's trick. */ -#define TRICK 2 -/** Interleaving of w-(T)NAFs. */ -#define INTER 3 -/** Joint sparse form. */ -#define JOINT 4 -/** Chosen prime elliptic curve simulteanous point multiplication method. */ -#define ED_SIM @ED_SIM@ - -/** Prime curves. */ -#define PRIME 1 -/** Binary curves. */ -#define CHAR2 2 -/** Edwards curves */ -#define EDDIE 3 -/** Chosen elliptic curve type. */ -#define EC_CUR @EC_CUR@ - -/** Chosen elliptic curve cryptography method. */ -#define EC_METHD "@EC_METHD@" -/** Prefer curves with efficient endomorphisms. */ -#cmakedefine EC_ENDOM - -/** Basic quadratic extension field arithmetic. */ -#define BASIC 1 -/** Lazy-reduced extension field arithmetic. */ -#define LAZYR 2 -/* Chosen extension field arithmetic method. */ -#define PP_EXT @PP_EXT@ - -/** Bilinear pairing method. */ -#define PP_METHD "@PP_METHD@" - -/** Tate pairing. */ -#define TATEP 1 -/** Weil pairing. */ -#define WEILP 2 -/** Optimal ate pairing. */ -#define OATEP 3 -/** Chosen pairing method over prime elliptic curves. */ -#define PP_MAP @PP_MAP@ - -/** SHA-224 hash function. */ -#define SH224 2 -/** SHA-256 hash function. */ -#define SH256 3 -/** SHA-384 hash function. */ -#define SH384 4 -/** SHA-512 hash function. */ -#define SH512 5 -/** BLAKE2s-160 hash function. */ -#define B2S160 6 -/** BLAKE2s-256 hash function. */ -#define B2S256 7 -/** Chosen hash function. */ -#define MD_MAP @MD_MAP@ - -/** Choice of hash function. */ -#define MD_METHD "@MD_METHD@" - -/** Chosen RSA method. */ -#cmakedefine CP_CRT -/** RSA without padding. */ -#define BASIC 1 -/** RSA PKCS#1 v1.5 padding. */ -#define PKCS1 2 -/** RSA PKCS#1 v2.1 padding. */ -#define PKCS2 3 -/** Chosen RSA padding method. */ -#define CP_RSAPD @CP_RSAPD@ - -/** Automatic memory allocation. */ -#define AUTO 1 -/** Dynamic memory allocation. */ -#define DYNAMIC 2 -/** Chosen memory allocation policy. */ -#define ALLOC @ALLOC@ - -/** NIST HASH-DRBG generator. */ -#define HASHD 1 -/** Intel RdRand instruction. */ -#define RDRND 2 -/** Operating system underlying generator. */ -#define UDEV 3 -/** Override library generator with the callback. */ -#define CALL 4 -/** Chosen random generator. */ -#define RAND @RAND@ - -/** Standard C library generator. */ -#define LIBC 1 -/** Intel RdRand instruction. */ -#define RDRND 2 -/** Device node generator. */ -#define UDEV 3 -/** Use Windows' CryptGenRandom. */ -#define WCGR 4 -/** Chosen random generator seeder. */ -#cmakedefine SEED @SEED@ - -/** GNU/Linux operating system. */ -#define LINUX 1 -/** FreeBSD operating system. */ -#define FREEBSD 2 -/** Windows operating system. */ -#define MACOSX 3 -/** Windows operating system. */ -#define WINDOWS 4 -/** Android operating system. */ -#define DROID 5 -/** Arduino platform. */ -#define DUINO 6 -/** OpenBSD operating system. */ -#define OPENBSD 7 -/** Detected operation system. */ -#cmakedefine OPSYS @OPSYS@ - -/** OpenMP multithreading support. */ -#define OPENMP 1 -/** POSIX multithreading support. */ -#define PTHREAD 2 -/** Chosen multithreading API. */ -#cmakedefine MULTI @MULTI@ - -/** Per-process high-resolution timer. */ -#define HREAL 1 -/** Per-process high-resolution timer. */ -#define HPROC 2 -/** Per-thread high-resolution timer. */ -#define HTHRD 3 -/** POSIX-compatible timer. */ -#define POSIX 4 -/** ANSI-compatible timer. */ -#define ANSI 5 -/** Cycle-counting timer. */ -#define CYCLE 6 -/** Performance monitoring framework. */ -#define PERF 7 -/** Chosen timer. */ -#cmakedefine TIMER @TIMER@ - -/** Prefix to identity this build of the library. */ -#cmakedefine LABEL @LABEL@ - -#ifndef ASM - -#include "relic_label.h" - -/** - * Prints the project options selected at build time. - */ -void conf_print(void); - -#endif /* ASM */ - -#endif /* !RLC_CONF_H */ From be7f51a27d51026001376821b9900f47f43532cd Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 23 May 2023 13:06:02 +0100 Subject: [PATCH 540/656] Merge bitcoin/bitcoin#27717: test: Make `util/test_runner.py` honor `BITCOINUTIL` and `BITCOINTX` 4f2f615d1362afe92cabe9eab50087f8bfe454fd test: Make `util/test_runner.py` honor `BITCOINUTIL` and `BITCOINTX` (Hennadii Stepanov) Pull request description: This PR is a continuation of changes to our testing frameworks (https://github.com/bitcoin/bitcoin/pull/27554, https://github.com/bitcoin/bitcoin/pull/27561) that allow them to work correctly in a multi-config build environment that is possible for [upcoming](https://github.com/bitcoin/bitcoin/pull/25797) CMake-based build system. That means that built for different configurations binaries (e.g., "Debug" and "Release") can coexist in separated directories. The commit has been pulled from https://github.com/hebasto/bitcoin/pull/15 and it seems [useful](https://github.com/hebasto/bitcoin/pull/15#discussion_r1200251404) by itself as: > I believe the rationale for allowing to drop in the executables via env var is to allow to test the guix-produced, or other third-party-produced executables... The current implementation of the `test/functional/test_framework/test_framework.py` script uses the same approach: https://github.com/bitcoin/bitcoin/blob/09351f51d279612973ecd76811dc075dff08209f/test/functional/test_framework/test_framework.py#L231-L246 ACKs for top commit: MarcoFalke: lgtm ACK 4f2f615d1362afe92cabe9eab50087f8bfe454fd TheCharlatan: ACK 4f2f615d1362afe92cabe9eab50087f8bfe454fd stickies-v: ACK 4f2f615d1362afe92cabe9eab50087f8bfe454fd Tree-SHA512: 99ee9a727b266700649d8f2ec528dfaeb04a1e48f0cb1d4eeaece65917165be647c10c4548429a9e8b30d094597f67e860c1db03ac689ebb409b223ce1b63aa9 --- test/util/test_runner.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/util/test_runner.py b/test/util/test_runner.py index d12f8cec31bd..f96a74629b1f 100755 --- a/test/util/test_runner.py +++ b/test/util/test_runner.py @@ -74,6 +74,11 @@ def bctest(testDir, testObj, buildenv): """ # Get the exec names and arguments execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"]) + if testObj["exec"] == "./dash-util": + execprog = os.getenv("DASHUTIL", default=execprog) + elif testObj["exec"] == "./dash-tx": + execprog = os.getenv("DASHTX", default=execprog) + execargs = testObj['args'] execrun = [execprog] + execargs From 5ee733eb3d23af710551a1dd3416a0cb2f970561 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 26 Jul 2023 09:42:42 +0100 Subject: [PATCH 541/656] Merge bitcoin/bitcoin#28147: suppressions: note that `type:ClassName::MethodName` should be used d0c6cc4abe42163aaf081a969d2c449785563ba2 suppressions: note that 'type:ClassName::MethodName' should be used (fanquake) Pull request description: Now that the symbolizer is back in play, suppressions can once-again be targeted to functions, rather than file-wide. ACKs for top commit: MarcoFalke: lgtm ACK d0c6cc4abe42163aaf081a969d2c449785563ba2 hebasto: ACK d0c6cc4abe42163aaf081a969d2c449785563ba2 Tree-SHA512: fb65398eae18a6ebc5f8414275c568cf2664ab5357c2b3160f3bf285b67bc3af788225c5dba3c824c0e098627789450bec775375f52529d71c6ef700a9632d65 --- test/sanitizer_suppressions/ubsan | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 79507945434f..1d1f2ad352b5 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -1,9 +1,7 @@ +# Suppressions should use `sanitize-type:ClassName::MethodName`. + # -fsanitize=undefined suppressions # ================================= -# The suppressions would be `sanitize-type:ClassName::MethodName`, -# however due to a bug in clang the symbolizer is disabled and thus no symbol -# names can be used. -# See https://github.com/google/sanitizers/issues/1364 # -fsanitize=integer suppressions # =============================== @@ -11,8 +9,7 @@ # ------------ # Suppressions in dependencies that are developed outside this repository. unsigned-integer-overflow:*/include/c++/ -# unsigned-integer-overflow in FuzzedDataProvider's ConsumeIntegralInRange -unsigned-integer-overflow:FuzzedDataProvider.h +unsigned-integer-overflow:FuzzedDataProvider::ConsumeIntegralInRange unsigned-integer-overflow:leveldb/ unsigned-integer-overflow:minisketch/ unsigned-integer-overflow:secp256k1/ From 99e7997acaf2c74562028060174cebd47eda2015 Mon Sep 17 00:00:00 2001 From: pasta Date: Wed, 19 Nov 2025 17:23:58 -0600 Subject: [PATCH 542/656] fix: skip ISDLOCK inv announcements for peers requesting recsigs --- src/net_processing.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 392d1d41ef35..9778a0f638e2 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1188,6 +1188,13 @@ static void PushInv(Peer& peer, const CInv& inv) return; } + // Skip ISDLOCK inv announcements for peers that want recsigs, as they can reconstruct + // the islock from the recsig + if (inv.type == MSG_ISDLOCK && peer.m_wants_recsigs) { + LogPrint(BCLog::NET, "%s -- skipping ISDLOCK inv (peer wants recsigs): %s peer=%d\n", __func__, inv.ToString(), peer.m_id); + return; + } + LOCK(inv_relay->m_tx_inventory_mutex); if (inv_relay->m_tx_inventory_known_filter.contains(inv.hash)) { LogPrint(BCLog::NET, "%s -- skipping known inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id); @@ -6258,7 +6265,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (islock == nullptr) continue; uint256 isLockHash{::SerializeHash(*islock)}; tx_relay->m_tx_inventory_known_filter.insert(isLockHash); - queueAndMaybePushInv(CInv(MSG_ISDLOCK, isLockHash)); + // Skip ISDLOCK inv announcements for peers that want recsigs, as they can reconstruct + // the islock from the recsig + if (!peer->m_wants_recsigs) { + queueAndMaybePushInv(CInv(MSG_ISDLOCK, isLockHash)); + } } // Send an inv for the best ChainLock we have From 572bafd82cbbb2453ea2b5cb0df3685f67f8a752 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Thu, 20 Nov 2025 16:40:24 +0700 Subject: [PATCH 543/656] refactor: rename IsDsqOver to IsMixingThresholdExceeded --- src/coinjoin/client.cpp | 4 ++-- src/coinjoin/server.cpp | 4 ++-- src/masternode/meta.cpp | 2 +- src/masternode/meta.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/coinjoin/client.cpp b/src/coinjoin/client.cpp index b79671e65439..79651d64e428 100644 --- a/src/coinjoin/client.cpp +++ b/src/coinjoin/client.cpp @@ -124,7 +124,7 @@ MessageProcessingResult CCoinJoinClientQueueManager::ProcessMessage(NodeId from, LogPrint(BCLog::COINJOIN, "DSQUEUE -- CoinJoin queue is ready, masternode=%s, queue=%s\n", dmn->proTxHash.ToString(), dsq.ToString()); return ret; } else { - if (m_mn_metaman.IsDsqOver(dmn->proTxHash, tip_mn_list.GetValidMNsCount())) { + if (m_mn_metaman.IsMixingThresholdExceeded(dmn->proTxHash, tip_mn_list.GetValidMNsCount())) { LogPrint(BCLog::COINJOIN, "DSQUEUE -- Masternode %s is sending too many dsq messages\n", dmn->proTxHash.ToString()); return ret; @@ -1174,7 +1174,7 @@ bool CCoinJoinClientSession::StartNewQueue(CAmount nBalanceNeedsAnonymized, CCon continue; } - if (m_mn_metaman.IsDsqOver(dmn->proTxHash, nMnCount)) { + if (m_mn_metaman.IsMixingThresholdExceeded(dmn->proTxHash, nMnCount)) { WalletCJLogPrint(m_wallet, /* Continued */ "CCoinJoinClientSession::StartNewQueue -- too early to mix with node masternode=%s\n", dmn->proTxHash.ToString()); diff --git a/src/coinjoin/server.cpp b/src/coinjoin/server.cpp index c666e1df5e22..98069a79cf11 100644 --- a/src/coinjoin/server.cpp +++ b/src/coinjoin/server.cpp @@ -99,7 +99,7 @@ void CCoinJoinServer::ProcessDSACCEPT(CNode& peer, CDataStream& vRecv) } } - if (m_mn_metaman.IsDsqOver(dmn->proTxHash, mnList.GetValidMNsCount())) { + if (m_mn_metaman.IsMixingThresholdExceeded(dmn->proTxHash, mnList.GetValidMNsCount())) { if (fLogIPs) { LogPrint(BCLog::COINJOIN, "DSACCEPT -- last dsq too recent, must wait: peer=%d, addr=%s\n", peer.GetId(), peer.addr.ToStringAddrPort()); @@ -193,7 +193,7 @@ MessageProcessingResult CCoinJoinServer::ProcessDSQUEUE(NodeId from, CDataStream if (!dsq.fReady) { //don't allow a few nodes to dominate the queuing process - if (m_mn_metaman.IsDsqOver(dmn->proTxHash, tip_mn_list.GetValidMNsCount())) { + if (m_mn_metaman.IsMixingThresholdExceeded(dmn->proTxHash, tip_mn_list.GetValidMNsCount())) { LogPrint(BCLog::COINJOIN, "DSQUEUE -- node sending too many dsq messages, masternode=%s\n", dmn->proTxHash.ToString()); return ret; } diff --git a/src/masternode/meta.cpp b/src/masternode/meta.cpp index a0b98b4d0591..226aefb59f63 100644 --- a/src/masternode/meta.cpp +++ b/src/masternode/meta.cpp @@ -90,7 +90,7 @@ CMasternodeMetaInfo& CMasternodeMetaMan::GetMetaInfo(const uint256& proTxHash) return it->second; } -bool CMasternodeMetaMan::IsDsqOver(const uint256& protx_hash, int mn_count) const +bool CMasternodeMetaMan::IsMixingThresholdExceeded(const uint256& protx_hash, int mn_count) const { LOCK(cs); auto it = metaInfos.find(protx_hash); diff --git a/src/masternode/meta.h b/src/masternode/meta.h index 273c433e1acc..5e7729a9390f 100644 --- a/src/masternode/meta.h +++ b/src/masternode/meta.h @@ -219,7 +219,7 @@ class CMasternodeMetaMan : public MasternodeMetaStore // MN's threshold is calculated as the last dsq count this specific masternode was used in a mixing // session plus a margin of 20% of masternode count. In other words we expect at least 20% of unique // masternodes before we ever see a masternode that we know already mixed someone's funds earlier. - bool IsDsqOver(const uint256& protx_hash, int mn_count) const EXCLUSIVE_LOCKS_REQUIRED(!cs); + bool IsMixingThresholdExceeded(const uint256& protx_hash, int mn_count) const EXCLUSIVE_LOCKS_REQUIRED(!cs); void AllowMixing(const uint256& proTxHash) EXCLUSIVE_LOCKS_REQUIRED(!cs); void DisallowMixing(const uint256& proTxHash) EXCLUSIVE_LOCKS_REQUIRED(!cs); From de1b467f30ba554e7669d7f60800446ab57e5127 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 20 Aug 2024 13:26:33 +0100 Subject: [PATCH 544/656] merge bitcoin#30730: Bump time machine to 53396a22afc04536ddf75d8f82ad2eafa5082725 --- contrib/guix/libexec/prelude.bash | 2 +- contrib/guix/manifest.scm | 12 +----------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash index 67c05cf90b6b..0c38c27e32de 100644 --- a/contrib/guix/libexec/prelude.bash +++ b/contrib/guix/libexec/prelude.bash @@ -71,7 +71,7 @@ fi time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://codeberg.org/guix/guix.git \ - --commit=7bf1d7aeaffba15c4f680f93ae88fbef25427252 \ + --commit=53396a22afc04536ddf75d8f82ad2eafa5082725 \ --cores="$JOBS" \ --keep-failed \ --fallback \ diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 2f76f92167d5..0dcf6fbb2ca7 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -92,17 +92,7 @@ chain for " target " development.")) (home-page (package-home-page xgcc)) (license (package-license xgcc))))) -(define base-gcc - (package - (inherit gcc-12) ;; 12.3.0 - (version "12.4.0") - (source (origin - (method url-fetch) - (uri (string-append "mirror://gnu/gcc/gcc-" - version "/gcc-" version ".tar.xz")) - (sha256 - (base32 - "0xcida8l2wykvvzvpcrcn649gj0ijn64gwxbplacpg6c0hk6akvh")))))) +(define base-gcc gcc-12) ;; 12.4.0 (define base-linux-kernel-headers linux-libre-headers-6.1) From 7fef63d6bb90b6ecfe2f6b4318401858f562666e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Thu, 14 Nov 2024 11:02:15 +0000 Subject: [PATCH 545/656] merge bitcoin#31529: latest 2.31 glibc --- contrib/guix/manifest.scm | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 0dcf6fbb2ca7..738c395aebdc 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -451,7 +451,7 @@ inspecting signatures in Mach-O binaries.") #t)))))))) (define-public glibc-2.31 - (let ((commit "8e30f03744837a85e33d84ccd34ed3abe30d37c3")) + (let ((commit "7b27c450c34563a28e634cccb399cd415e71ebfe")) (package (inherit glibc) ;; 2.35 (version "2.31") @@ -463,7 +463,7 @@ inspecting signatures in Mach-O binaries.") (file-name (git-file-name "glibc" commit)) (sha256 (base32 - "1zi0s9yy5zkisw823vivn7zlj8w6g9p3mm7lmlqiixcxdkz4dbn6")) + "017qdpr5id7ddb4lpkzj2li1abvw916m3fc6n7nw28z4h5qbv2n0")) (patches (search-our-patches "glibc-guix-prefix.patch")))) (arguments (substitute-keyword-arguments (package-arguments glibc) @@ -473,6 +473,8 @@ inspecting signatures in Mach-O binaries.") (list "--enable-stack-protector=all", "--enable-bind-now", "--disable-werror", + "--disable-timezone-tools", + "--disable-profile", building-on))) ((#:phases phases) `(modify-phases ,phases From 9c9e876d70dea19d3312fb40dfa91b3660bf5f76 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Fri, 7 Feb 2025 10:02:54 +0000 Subject: [PATCH 546/656] partial bitcoin#33185: update time-machine to 5cb84f2013c5b1e48a7d0e617032266f1e6059e2 excludes: - 91b5cbaabbca49a8bd9df6da2506070b31482892 - 59c4898994bde3d86168075f0031c9d5a9ac5c8f --- contrib/devtools/symbol-check.py | 2 +- contrib/guix/libexec/prelude.bash | 2 +- contrib/guix/manifest.scm | 11 ++-- .../guix/patches/glibc-riscv-jumptarget.patch | 57 +++++++++++++++++++ 4 files changed, 65 insertions(+), 7 deletions(-) create mode 100644 contrib/guix/patches/glibc-riscv-jumptarget.patch diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index 13d0a5d398b1..c296ad16fb9a 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -253,7 +253,7 @@ def check_MACHO_sdk(binary) -> bool: return False def check_MACHO_lld(binary) -> bool: - if binary.build_version.tools[0].version == [18, 1, 8]: + if binary.build_version.tools[0].version == [19, 1, 4]: return True return False diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash index 0c38c27e32de..196a5e2c683a 100644 --- a/contrib/guix/libexec/prelude.bash +++ b/contrib/guix/libexec/prelude.bash @@ -71,7 +71,7 @@ fi time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://codeberg.org/guix/guix.git \ - --commit=53396a22afc04536ddf75d8f82ad2eafa5082725 \ + --commit=5cb84f2013c5b1e48a7d0e617032266f1e6059e2 \ --cores="$JOBS" \ --keep-failed \ --fallback \ diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 738c395aebdc..35e678722a02 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -453,7 +453,7 @@ inspecting signatures in Mach-O binaries.") (define-public glibc-2.31 (let ((commit "7b27c450c34563a28e634cccb399cd415e71ebfe")) (package - (inherit glibc) ;; 2.35 + (inherit glibc) ;; 2.39 (version "2.31") (source (origin (method git-fetch) @@ -464,7 +464,8 @@ inspecting signatures in Mach-O binaries.") (sha256 (base32 "017qdpr5id7ddb4lpkzj2li1abvw916m3fc6n7nw28z4h5qbv2n0")) - (patches (search-our-patches "glibc-guix-prefix.patch")))) + (patches (search-our-patches "glibc-guix-prefix.patch" + "glibc-riscv-jumptarget.patch")))) (arguments (substitute-keyword-arguments (package-arguments glibc) ((#:configure-flags flags) @@ -537,9 +538,9 @@ inspecting signatures in Mach-O binaries.") (list gcc-toolchain-12 "static") (make-bitcoin-cross-toolchain target))) ((string-contains target "darwin") - (list clang-toolchain-18 - lld-18 - (make-lld-wrapper lld-18 #:lld-as-ld? #t) + (list clang-toolchain-19 + lld-19 + (make-lld-wrapper lld-19 #:lld-as-ld? #t) python-signapple zip)) (else '()))))) diff --git a/contrib/guix/patches/glibc-riscv-jumptarget.patch b/contrib/guix/patches/glibc-riscv-jumptarget.patch new file mode 100644 index 000000000000..702959433d86 --- /dev/null +++ b/contrib/guix/patches/glibc-riscv-jumptarget.patch @@ -0,0 +1,57 @@ +commit 68389203832ab39dd0dbaabbc4059e7fff51c29b +Author: Fangrui Song +Date: Thu Oct 28 11:39:49 2021 -0700 + + riscv: Fix incorrect jal with HIDDEN_JUMPTARGET + + A non-local STV_DEFAULT defined symbol is by default preemptible in a + shared object. j/jal cannot target a preemptible symbol. On other + architectures, such a jump instruction either causes PLT [BZ #18822], or + if short-ranged, sometimes rejected by the linker (but not by GNU ld's + riscv port [ld PR/28509]). + + Use HIDDEN_JUMPTARGET to target a non-preemptible symbol instead. + + With this patch, ld.so and libc.so can be linked with LLD if source + files are compiled/assembled with -mno-relax/-Wa,-mno-relax. + + Acked-by: Palmer Dabbelt + Reviewed-by: Adhemerval Zanella + +Can be dropped when we are using glibc 2.35 or later. + +diff --git a/sysdeps/riscv/setjmp.S b/sysdeps/riscv/setjmp.S +index 0b92016b31..bec7ff80f4 100644 +--- a/sysdeps/riscv/setjmp.S ++++ b/sysdeps/riscv/setjmp.S +@@ -21,7 +21,7 @@ + + ENTRY (_setjmp) + li a1, 0 +- j __sigsetjmp ++ j HIDDEN_JUMPTARGET (__sigsetjmp) + END (_setjmp) + ENTRY (setjmp) + li a1, 1 +diff --git a/sysdeps/unix/sysv/linux/riscv/setcontext.S b/sysdeps/unix/sysv/linux/riscv/setcontext.S +index 9510518750..e44a68aad4 100644 +--- a/sysdeps/unix/sysv/linux/riscv/setcontext.S ++++ b/sysdeps/unix/sysv/linux/riscv/setcontext.S +@@ -95,6 +95,7 @@ LEAF (__setcontext) + 99: j __syscall_error + + END (__setcontext) ++libc_hidden_def (__setcontext) + weak_alias (__setcontext, setcontext) + + LEAF (__start_context) +@@ -108,7 +109,7 @@ LEAF (__start_context) + /* Invoke subsequent context if present, else exit(0). */ + mv a0, s2 + beqz s2, 1f +- jal __setcontext +-1: j exit ++ jal HIDDEN_JUMPTARGET (__setcontext) ++1: j HIDDEN_JUMPTARGET (exit) + + END (__start_context) From ce506bc6038d376bb47efd3abad54e8625684e82 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Thu, 20 Nov 2025 13:58:22 +0300 Subject: [PATCH 547/656] perf: reduce cs_main lock scope in evodb verify/repair operations Previously, evodb_verify_or_repair_impl held cs_main for the entire operation, which could take minutes when verifying/repairing large block ranges. This caused significant lock contention and blocked other operations requiring cs_main. This commit reduces the cs_main lock scope to only the initial setup phase where we resolve block indexes from the active chain. The actual verification and repair work (applying diffs, rebuilding lists from blocks, verifying snapshots) now runs without holding cs_main. Changes: - Wrap block index resolution in a scoped cs_main lock - Remove AssertLockHeld(cs_main) from helper functions: * RecalculateAndRepairDiffs * CollectSnapshotBlocks * VerifySnapshotPair * RepairSnapshotPair * RebuildListFromBlock (CSpecialTxProcessor) - Update function signatures to remove EXCLUSIVE_LOCKS_REQUIRED(cs_main) This is safe because: - CBlockIndex pointers remain valid after lock release (never deleted) - Block parent relationships (pprev, GetAncestor) are immutable - ReadBlockFromDisk takes cs_main internally when accessing nFile/nDataPos - Helper functions only process already-loaded block data and snapshots - ChainLocks prevent deep reorgs in Dash anyway --- src/evo/deterministicmns.cpp | 8 ----- src/evo/deterministicmns.h | 10 +++---- src/evo/specialtxman.cpp | 2 -- src/evo/specialtxman.h | 3 +- src/rpc/evo.cpp | 58 ++++++++++++++++++------------------ 5 files changed, 34 insertions(+), 47 deletions(-) diff --git a/src/evo/deterministicmns.cpp b/src/evo/deterministicmns.cpp index 0b79a38e9521..b29622903c59 100644 --- a/src/evo/deterministicmns.cpp +++ b/src/evo/deterministicmns.cpp @@ -1586,8 +1586,6 @@ CDeterministicMNManager::RecalcDiffsResult CDeterministicMNManager::RecalculateA const CBlockIndex* start_index, const CBlockIndex* stop_index, ChainstateManager& chainman, BuildListFromBlockFunc build_list_func, bool repair) { - AssertLockHeld(::cs_main); - RecalcDiffsResult result; result.start_height = start_index->nHeight; result.stop_height = stop_index->nHeight; @@ -1696,8 +1694,6 @@ CDeterministicMNManager::RecalcDiffsResult CDeterministicMNManager::RecalculateA std::vector CDeterministicMNManager::CollectSnapshotBlocks( const CBlockIndex* start_index, const CBlockIndex* stop_index, const Consensus::Params& consensus_params) { - AssertLockHeld(::cs_main); - std::vector snapshot_blocks; // Add the starting snapshot (find the snapshot at or before start) @@ -1749,8 +1745,6 @@ bool CDeterministicMNManager::VerifySnapshotPair( const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, const CDeterministicMNList& to_snapshot, RecalcDiffsResult& result) { - AssertLockHeld(::cs_main); - // Verify this snapshot pair by applying all stored diffs sequentially CDeterministicMNList test_list = from_snapshot; @@ -1795,8 +1789,6 @@ std::vector> CDeterministicMNManage const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, const CDeterministicMNList& to_snapshot, BuildListFromBlockFunc build_list_func, RecalcDiffsResult& result) { - AssertLockHeld(::cs_main); - CDeterministicMNList current_list = from_snapshot; // Temporary storage for recalculated diffs (one per block in this snapshot interval) std::vector> temp_diffs; diff --git a/src/evo/deterministicmns.h b/src/evo/deterministicmns.h index b01fbd5f8f7e..32888c40e8b9 100644 --- a/src/evo/deterministicmns.h +++ b/src/evo/deterministicmns.h @@ -743,7 +743,7 @@ class CDeterministicMNManager [[nodiscard]] RecalcDiffsResult RecalculateAndRepairDiffs( const CBlockIndex* start_index, const CBlockIndex* stop_index, ChainstateManager& chainman, BuildListFromBlockFunc build_list_func, - bool repair) EXCLUSIVE_LOCKS_REQUIRED(!cs, ::cs_main); + bool repair) EXCLUSIVE_LOCKS_REQUIRED(!cs); // Migration support for nVersion-first CDeterministicMNStateDiff format [[nodiscard]] bool IsMigrationRequired() const EXCLUSIVE_LOCKS_REQUIRED(!cs, ::cs_main); @@ -755,14 +755,12 @@ class CDeterministicMNManager // Helper methods for RecalculateAndRepairDiffs std::vector CollectSnapshotBlocks(const CBlockIndex* start_index, const CBlockIndex* stop_index, - const Consensus::Params& consensus_params) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + const Consensus::Params& consensus_params); bool VerifySnapshotPair(const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, - const CDeterministicMNList& to_snapshot, RecalcDiffsResult& result) - EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + const CDeterministicMNList& to_snapshot, RecalcDiffsResult& result); std::vector> RepairSnapshotPair( const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, - const CDeterministicMNList& to_snapshot, BuildListFromBlockFunc build_list_func, RecalcDiffsResult& result) - EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + const CDeterministicMNList& to_snapshot, BuildListFromBlockFunc build_list_func, RecalcDiffsResult& result); void WriteRepairedDiffs(const std::vector>& recalculated_diffs, RecalcDiffsResult& result) EXCLUSIVE_LOCKS_REQUIRED(!cs); }; diff --git a/src/evo/specialtxman.cpp b/src/evo/specialtxman.cpp index a788bc22437f..f5bd3f810117 100644 --- a/src/evo/specialtxman.cpp +++ b/src/evo/specialtxman.cpp @@ -185,8 +185,6 @@ bool CSpecialTxProcessor::RebuildListFromBlock(const CBlock& block, gsl::not_nul bool debugLogs, BlockValidationState& state, CDeterministicMNList& mnListRet) { - AssertLockHeld(cs_main); - // Verify that prevList either represents an empty/initial state (default-constructed), // or it matches the previous block's hash. assert(prevList == CDeterministicMNList() || prevList.GetBlockHash() == pindexPrev->GetBlockHash()); diff --git a/src/evo/specialtxman.h b/src/evo/specialtxman.h index 461fa8d222c1..9faa57dd02f4 100644 --- a/src/evo/specialtxman.h +++ b/src/evo/specialtxman.h @@ -83,8 +83,7 @@ class CSpecialTxProcessor // Used for rebuilding diffs from trusted snapshots bool RebuildListFromBlock(const CBlock& block, gsl::not_null pindexPrev, const CDeterministicMNList& prevList, const CCoinsViewCache& view, bool debugLogs, - BlockValidationState& state, CDeterministicMNList& mnListRet) - EXCLUSIVE_LOCKS_REQUIRED(cs_main); + BlockValidationState& state, CDeterministicMNList& mnListRet); private: bool CheckCreditPoolDiffForBlock(const CBlock& block, const CBlockIndex* pindex, const CCbTx& cbTx, diff --git a/src/rpc/evo.cpp b/src/rpc/evo.cpp index 12bd74cc79dd..7de013d32728 100644 --- a/src/rpc/evo.cpp +++ b/src/rpc/evo.cpp @@ -1760,37 +1760,38 @@ static UniValue evodb_verify_or_repair_impl(const JSONRPCRequest& request, bool CDeterministicMNManager& dmnman = *CHECK_NONFATAL(node.dmnman); CChainstateHelper& chain_helper = *CHECK_NONFATAL(node.chain_helper); - LOCK(::cs_main); - const CBlockIndex* start_index; const CBlockIndex* stop_index; - // Default to DIP0003 activation height if startBlock not specified - if (request.params[0].isNull()) { - const auto& consensus_params = Params().GetConsensus(); - start_index = chainman.ActiveChain()[consensus_params.DIP0003Height]; - if (!start_index) { - throw JSONRPCError(RPC_INTERNAL_ERROR, "Cannot find DIP0003 activation block"); - } - } else { - uint256 start_block_hash = ParseBlock(request.params[0], chainman, "startBlock"); - start_index = chainman.m_blockman.LookupBlockIndex(start_block_hash); - if (!start_index) { - throw JSONRPCError(RPC_INVALID_PARAMETER, "Start block not found"); + { + LOCK(::cs_main); + // Default to DIP0003 activation height if startBlock not specified + if (request.params[0].isNull()) { + const auto& consensus_params = Params().GetConsensus(); + start_index = chainman.ActiveChain()[consensus_params.DIP0003Height]; + if (!start_index) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Cannot find DIP0003 activation block"); + } + } else { + uint256 start_block_hash = ParseBlock(request.params[0], chainman, "startBlock"); + start_index = chainman.m_blockman.LookupBlockIndex(start_block_hash); + if (!start_index) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Start block not found"); + } } - } - // Default to chain tip if stopBlock not specified - if (request.params[1].isNull()) { - stop_index = chainman.ActiveChain().Tip(); - if (!stop_index) { - throw JSONRPCError(RPC_INTERNAL_ERROR, "Cannot find chain tip"); - } - } else { - uint256 stop_block_hash = ParseBlock(request.params[1], chainman, "stopBlock"); - stop_index = chainman.m_blockman.LookupBlockIndex(stop_block_hash); - if (!stop_index) { - throw JSONRPCError(RPC_INVALID_PARAMETER, "Stop block not found"); + // Default to chain tip if stopBlock not specified + if (request.params[1].isNull()) { + stop_index = chainman.ActiveChain().Tip(); + if (!stop_index) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Cannot find chain tip"); + } + } else { + uint256 stop_block_hash = ParseBlock(request.params[1], chainman, "stopBlock"); + stop_index = chainman.m_blockman.LookupBlockIndex(stop_block_hash); + if (!stop_index) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Stop block not found"); + } } } @@ -1802,12 +1803,11 @@ static UniValue evodb_verify_or_repair_impl(const JSONRPCRequest& request, bool throw JSONRPCError(RPC_INVALID_PARAMETER, "stopBlock must be >= startBlock"); } - // Create a callback that wraps CSpecialTxProcessor::BuildNewListFromBlock - // NO_THREAD_SAFETY_ANALYSIS: cs_main is held by the calling function (evodb_verify_or_repair_impl) + // Create a callback that wraps CSpecialTxProcessor::RebuildListFromBlock auto build_list_func = [&chain_helper](const CBlock& block, gsl::not_null pindexPrev, const CDeterministicMNList& prevList, const CCoinsViewCache& view, bool debugLogs, BlockValidationState& state, - CDeterministicMNList& mnListRet) NO_THREAD_SAFETY_ANALYSIS -> bool { + CDeterministicMNList& mnListRet) -> bool { return chain_helper.special_tx->RebuildListFromBlock(block, pindexPrev, prevList, view, debugLogs, state, mnListRet); }; From 084bb62307824e177ffdd3db401c5161a4dcebc2 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Thu, 20 Nov 2025 15:13:21 +0300 Subject: [PATCH 548/656] chore: clang-format --- src/evo/deterministicmns.h | 15 ++++++++------- src/evo/specialtxman.h | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/evo/deterministicmns.h b/src/evo/deterministicmns.h index 32888c40e8b9..80aabbe113aa 100644 --- a/src/evo/deterministicmns.h +++ b/src/evo/deterministicmns.h @@ -740,10 +740,10 @@ class CDeterministicMNManager BlockValidationState& state, CDeterministicMNList& mnListRet)>; - [[nodiscard]] RecalcDiffsResult RecalculateAndRepairDiffs( - const CBlockIndex* start_index, const CBlockIndex* stop_index, - ChainstateManager& chainman, BuildListFromBlockFunc build_list_func, - bool repair) EXCLUSIVE_LOCKS_REQUIRED(!cs); + [[nodiscard]] RecalcDiffsResult RecalculateAndRepairDiffs(const CBlockIndex* start_index, + const CBlockIndex* stop_index, ChainstateManager& chainman, + BuildListFromBlockFunc build_list_func, bool repair) + EXCLUSIVE_LOCKS_REQUIRED(!cs); // Migration support for nVersion-first CDeterministicMNStateDiff format [[nodiscard]] bool IsMigrationRequired() const EXCLUSIVE_LOCKS_REQUIRED(!cs, ::cs_main); @@ -755,9 +755,10 @@ class CDeterministicMNManager // Helper methods for RecalculateAndRepairDiffs std::vector CollectSnapshotBlocks(const CBlockIndex* start_index, const CBlockIndex* stop_index, - const Consensus::Params& consensus_params); - bool VerifySnapshotPair(const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, - const CDeterministicMNList& to_snapshot, RecalcDiffsResult& result); + const Consensus::Params& consensus_params); + bool VerifySnapshotPair(const CBlockIndex* from_index, const CBlockIndex* to_index, + const CDeterministicMNList& from_snapshot, const CDeterministicMNList& to_snapshot, + RecalcDiffsResult& result); std::vector> RepairSnapshotPair( const CBlockIndex* from_index, const CBlockIndex* to_index, const CDeterministicMNList& from_snapshot, const CDeterministicMNList& to_snapshot, BuildListFromBlockFunc build_list_func, RecalcDiffsResult& result); diff --git a/src/evo/specialtxman.h b/src/evo/specialtxman.h index 9faa57dd02f4..de293d0dfaba 100644 --- a/src/evo/specialtxman.h +++ b/src/evo/specialtxman.h @@ -82,8 +82,8 @@ class CSpecialTxProcessor // Variant that takes an explicit starting list instead of loading from GetListForBlock // Used for rebuilding diffs from trusted snapshots bool RebuildListFromBlock(const CBlock& block, gsl::not_null pindexPrev, - const CDeterministicMNList& prevList, const CCoinsViewCache& view, bool debugLogs, - BlockValidationState& state, CDeterministicMNList& mnListRet); + const CDeterministicMNList& prevList, const CCoinsViewCache& view, bool debugLogs, + BlockValidationState& state, CDeterministicMNList& mnListRet); private: bool CheckCreditPoolDiffForBlock(const CBlock& block, const CBlockIndex* pindex, const CCbTx& cbTx, From 6700ca9f15c5a8aa3106c9b40dc8b7fa4c4c1409 Mon Sep 17 00:00:00 2001 From: pasta Date: Thu, 20 Nov 2025 12:36:35 -0600 Subject: [PATCH 549/656] fix: use proper host objcopy/readelf --- contrib/guix/libexec/build.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 6408234d89f9..88023e71c25f 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -345,7 +345,7 @@ mkdir -p "$DISTSRC" case "$HOST" in *linux*) # Compress DWARF sections in debug files and set proper permissions - find "${DISTNAME}" -name "*.dbg" -type f -print0 | xargs -0 -P"$JOBS" -I{} sh -c "objcopy --compress-debug-sections=zlib \"\$1\" \"\$1.tmp\" && mv \"\$1.tmp\" \"\$1\" && chmod 644 \"\$1\"" _ {} + find "${DISTNAME}" -name "*.dbg" -type f -print0 | xargs -0 -P"$JOBS" -I{} sh -c "${HOST}-objcopy --compress-debug-sections=zlib \"\$1\" \"\$1.tmp\" && mv \"\$1.tmp\" \"\$1\" && chmod 644 \"\$1\"" _ {} # Create .build-id tree for perf auto-discovery mkdir -p "${DISTNAME}/usr/lib/debug/.build-id" @@ -354,7 +354,7 @@ mkdir -p "$DISTSRC" find "${DISTNAME}/lib" -type f -print0 } | while IFS= read -r -d '' elf; do if file "$elf" | grep -q "ELF.*executable\|ELF.*shared object"; then - build_id=$(readelf -n "$elf" 2>/dev/null | awk '/Build ID/ {print $3; exit}') + build_id=$(${HOST}-readelf -n "$elf" 2>/dev/null | awk '/Build ID/ {print $3; exit}') if [ -n "$build_id" ] && [ -f "${elf}.dbg" ]; then dir="${DISTNAME}/usr/lib/debug/.build-id/${build_id:0:2}" mkdir -p "$dir" @@ -374,13 +374,13 @@ mkdir -p "$DISTSRC" while IFS= read -r -d '' elf; do if file "$elf" | grep -q "ELF.*executable\|ELF.*shared object"; then # Check for build-id - if ! readelf -n "$elf" 2>/dev/null | grep -q "Build ID"; then + if ! ${HOST}-readelf -n "$elf" 2>/dev/null | grep -q "Build ID"; then echo "ERROR: No build-id found in $elf" >&2 verification_failed=1 fi # Check for .gnu_debuglink - if ! readelf --string-dump=.gnu_debuglink "$elf" >/dev/null 2>&1; then + if ! ${HOST}-readelf --string-dump=.gnu_debuglink "$elf" >/dev/null 2>&1; then echo "ERROR: No .gnu_debuglink found in $elf" >&2 verification_failed=1 fi From 04ac20aebaabfe6d6d71de80115696976d2bd1da Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Thu, 25 Jul 2024 20:56:12 +0200 Subject: [PATCH 550/656] partial bitcoin#30527: Bump python minimum supported version to 3.10 --- .python-version | 2 +- contrib/containers/ci/ci-slim.Dockerfile | 2 +- doc/dependencies.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.python-version b/.python-version index 43077b246094..1445aee866ce 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.9.18 +3.10.14 diff --git a/contrib/containers/ci/ci-slim.Dockerfile b/contrib/containers/ci/ci-slim.Dockerfile index ae59495f3981..76c9bfdb4e85 100644 --- a/contrib/containers/ci/ci-slim.Dockerfile +++ b/contrib/containers/ci/ci-slim.Dockerfile @@ -55,7 +55,7 @@ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ # Install Python to a system-wide location and set it as default # PYTHON_VERSION should match the value in .python-version -ARG PYTHON_VERSION=3.9.18 +ARG PYTHON_VERSION=3.10.14 ENV UV_PYTHON_INSTALL_DIR=/usr/local/python RUN uv python install ${PYTHON_VERSION} diff --git a/doc/dependencies.md b/doc/dependencies.md index 6a55eaba416e..2a2b7ec28370 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -10,7 +10,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | [Automake](https://www.gnu.org/software/automake/) | [1.13](https://github.com/bitcoin/bitcoin/pull/18290) | | [Clang](https://clang.llvm.org) | [16.0](https://github.com/bitcoin/bitcoin/pull/30263) | | [GCC](https://gcc.gnu.org) | [11.1](https://github.com/bitcoin/bitcoin/pull/29091) | -| [Python](https://www.python.org) (scripts, tests) | [3.9](https://github.com/bitcoin/bitcoin/pull/28211) | +| [Python](https://www.python.org) (scripts, tests) | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | | [systemtap](https://sourceware.org/systemtap/) ([tracing](tracing.md))| N/A | ## Required From 7b80dfbad6272d16ea9a685f0216d152f49723dc Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Fri, 25 Aug 2023 21:23:29 -0600 Subject: [PATCH 551/656] merge bitcoin#28347: replace deprecated pkg_resources with importlib.metadata --- test/lint/lint-python.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py index 574cb65cc05e..fb6224b06e8a 100755 --- a/test/lint/lint-python.py +++ b/test/lint/lint-python.py @@ -9,10 +9,12 @@ """ import os -import pkg_resources import subprocess import sys +from importlib.metadata import metadata, PackageNotFoundError + + DEPS = ['flake8', 'lief', 'mypy', 'pyzmq'] MYPY_CACHE_DIR = f"{os.getenv('BASE_ROOT_DIR', '')}/test/.mypy_cache" FILES_ARGS = ['git', 'ls-files', '--','test/functional/*.py', 'contrib/devtools/*.py', ':(exclude)contrib/devtools/github-merge.py'] @@ -95,10 +97,10 @@ def check_dependencies(): - working_set = {pkg.key for pkg in pkg_resources.working_set} - for dep in DEPS: - if dep not in working_set: + try: + metadata(dep) + except PackageNotFoundError: print(f"Skipping Python linting since {dep} is not installed.") exit(0) From 80a44e9b34c636a301920a67c42168cee10ad516 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 17 Nov 2025 22:00:44 +0530 Subject: [PATCH 552/656] partial bitcoin#26257: python linter flake8 E275 fixup, update dependencies includes: - 1e5e87cec3 (portions) --- ci/lint/04_install.sh | 6 +++--- contrib/containers/ci/ci-slim.Dockerfile | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index f70ff2d2235e..4bdb44f8245a 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -36,9 +36,9 @@ fi ${CI_RETRY_EXE} pip3 install codespell==2.1.0 ${CI_RETRY_EXE} pip3 install flake8==4.0.1 ${CI_RETRY_EXE} pip3 install lief==0.13.1 -${CI_RETRY_EXE} pip3 install mypy==0.942 -${CI_RETRY_EXE} pip3 install pyzmq==22.3.0 -${CI_RETRY_EXE} pip3 install vulture==2.3 +${CI_RETRY_EXE} pip3 install mypy==0.971 +${CI_RETRY_EXE} pip3 install pyzmq==24.0.1 +${CI_RETRY_EXE} pip3 install vulture==2.6 SHELLCHECK_VERSION=v0.8.0 curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | \ diff --git a/contrib/containers/ci/ci-slim.Dockerfile b/contrib/containers/ci/ci-slim.Dockerfile index 76c9bfdb4e85..c5f822a8d286 100644 --- a/contrib/containers/ci/ci-slim.Dockerfile +++ b/contrib/containers/ci/ci-slim.Dockerfile @@ -78,9 +78,9 @@ RUN uv pip install --system --break-system-packages \ jinja2 \ lief==0.13.2 \ multiprocess \ - mypy==0.942 \ - pyzmq==22.3.0 \ - vulture==2.3 + mypy==0.971 \ + pyzmq==24.0.1 \ + vulture==2.6 # Install packages relied on by tests ARG DASH_HASH_VERSION=1.4.0 From 4110ff38fc70a9df24d019848be57548c8490f6e Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Mon, 17 Nov 2025 22:01:52 +0530 Subject: [PATCH 553/656] lint: mypy 0.981 --- ci/lint/04_install.sh | 2 +- contrib/containers/ci/ci-slim.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index 4bdb44f8245a..0453e9167488 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -36,7 +36,7 @@ fi ${CI_RETRY_EXE} pip3 install codespell==2.1.0 ${CI_RETRY_EXE} pip3 install flake8==4.0.1 ${CI_RETRY_EXE} pip3 install lief==0.13.1 -${CI_RETRY_EXE} pip3 install mypy==0.971 +${CI_RETRY_EXE} pip3 install mypy==0.981 ${CI_RETRY_EXE} pip3 install pyzmq==24.0.1 ${CI_RETRY_EXE} pip3 install vulture==2.6 diff --git a/contrib/containers/ci/ci-slim.Dockerfile b/contrib/containers/ci/ci-slim.Dockerfile index c5f822a8d286..1738f0ab619a 100644 --- a/contrib/containers/ci/ci-slim.Dockerfile +++ b/contrib/containers/ci/ci-slim.Dockerfile @@ -78,7 +78,7 @@ RUN uv pip install --system --break-system-packages \ jinja2 \ lief==0.13.2 \ multiprocess \ - mypy==0.971 \ + mypy==0.981 \ pyzmq==24.0.1 \ vulture==2.6 From d6489f00c7bf141dd5a6c1ba4d8b967e7a3d7cd5 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Tue, 18 Nov 2025 10:16:29 +0530 Subject: [PATCH 554/656] fix: make copy of `skip` in `GetStackFrames` to avoid clobbering --- src/stacktraces.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/stacktraces.cpp b/src/stacktraces.cpp index 26283eb23109..f1d68f27dbc2 100644 --- a/src/stacktraces.cpp +++ b/src/stacktraces.cpp @@ -167,6 +167,8 @@ static uint64_t ConvertAddress(uint64_t addr) static __attribute__((noinline)) std::vector GetStackFrames(size_t skip, size_t max_frames, const CONTEXT* pContext = nullptr) { #ifdef ENABLE_STACKTRACES + volatile size_t skip_frames = skip; + // We can't use libbacktrace for stack unwinding on Windows as it returns invalid addresses (like 0x1 or 0xffffffff) // dbghelp is not thread safe static StdMutex m; @@ -205,7 +207,7 @@ static __attribute__((noinline)) std::vector GetStackFrames(size_t ski stackframe.AddrStack.Offset = context.Rsp; stackframe.AddrStack.Mode = AddrModeFlat; if (!pContext) { - skip++; // skip this method + skip_frames = skip_frames + 1; // skip this method } #else #error unsupported architecture @@ -223,7 +225,7 @@ static __attribute__((noinline)) std::vector GetStackFrames(size_t ski if (!result) { break; } - if (i >= skip) { + if (i >= skip_frames) { uint64_t pc = ConvertAddress(stackframe.AddrPC.Offset); if (pc == 0) { pc = stackframe.AddrPC.Offset; From 6a1786cb1f0e145d8172b4e84e807d24446c0dcf Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Nov 2025 08:16:42 +0530 Subject: [PATCH 555/656] fix: resolve `test: =: unary operator expected` error --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index f4fa99953b1c..6eccd119c840 100644 --- a/configure.ac +++ b/configure.ac @@ -1272,7 +1272,7 @@ if test "$enable_crashhooks" = "yes"; then fi AX_CHECK_LINK_FLAG([-Wl,-wrap=__cxa_allocate_exception], [LINK_WRAP_SUPPORTED=yes],,,) -AM_CONDITIONAL([CRASH_HOOKS_WRAPPED_CXX_ABI],[test $LINK_WRAP_SUPPORTED = "yes"]) +AM_CONDITIONAL([CRASH_HOOKS_WRAPPED_CXX_ABI],[test "$LINK_WRAP_SUPPORTED" = "yes"]) if test "$LINK_WRAP_SUPPORTED" = "yes"; then AC_DEFINE(CRASH_HOOKS_WRAPPED_CXX_ABI, 1, [Define this symbol to use wrapped CXX ABIs for exception stacktraces]) From 6ea897ac0024a3a2230fe63c23ea5d82e824fae9 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Nov 2025 08:24:01 +0530 Subject: [PATCH 556/656] chore: drop unmaintained Guix container --- contrib/guix/Dockerfile | 63 ----------------------------------------- contrib/guix/INSTALL.md | 5 ++-- 2 files changed, 2 insertions(+), 66 deletions(-) delete mode 100644 contrib/guix/Dockerfile diff --git a/contrib/guix/Dockerfile b/contrib/guix/Dockerfile deleted file mode 100644 index bffb10f75c48..000000000000 --- a/contrib/guix/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -FROM alpine:3.17 - -RUN apk --no-cache --update add \ - bash \ - bzip2 \ - ca-certificates \ - curl \ - git \ - make \ - shadow - -ARG guix_download_path=ftp://ftp.gnu.org/gnu/guix -ARG guix_version=1.4.0 -ARG guix_checksum_aarch64=72d807392889919940b7ec9632c45a259555e6b0942ea7bfd131101e08ebfcf4 -ARG guix_checksum_x86_64=236ca7c9c5958b1f396c2924fcc5bc9d6fdebcb1b4cf3c7c6d46d4bf660ed9c9 -ARG builder_count=32 - -ENV PATH="/root/.config/guix/current/bin:$PATH" - -# Application Setup -# https://guix.gnu.org/manual/en/html_node/Application-Setup.html -ENV GUIX_LOCPATH="/root/.guix-profile/lib/locale" -ENV LC_ALL="en_US.UTF-8" - -RUN guix_file_name=guix-binary-${guix_version}.$(uname -m)-linux.tar.xz && \ - eval "guix_checksum=\${guix_checksum_$(uname -m)}" && \ - cd /tmp && \ - wget -q -O "$guix_file_name" "${guix_download_path}/${guix_file_name}" && \ - echo "${guix_checksum} ${guix_file_name}" | sha256sum -c && \ - tar xJf "$guix_file_name" && \ - mv var/guix /var/ && \ - mv gnu / && \ - mkdir -p ~root/.config/guix && \ - ln -sf /var/guix/profiles/per-user/root/current-guix ~root/.config/guix/current && \ - source ~root/.config/guix/current/etc/profile - -# Guix expects this file to exist -RUN touch /etc/nsswitch.conf - -RUN guix archive --authorize < ~root/.config/guix/current/share/guix/ci.guix.gnu.org.pub - -# Build Environment Setup -# https://guix.gnu.org/manual/en/html_node/Build-Environment-Setup.html - -RUN groupadd --system guixbuild -RUN for i in $(seq -w 1 ${builder_count}); do \ - useradd -g guixbuild -G guixbuild \ - -d /var/empty -s $(which nologin) \ - -c "Guix build user ${i}" --system \ - "guixbuilder${i}" ; \ - done - -ENTRYPOINT ["/root/.config/guix/current/bin/guix-daemon","--build-users-group=guixbuild"] - -RUN git clone https://github.com/dashpay/dash.git /dash - -RUN mkdir base_cache sources SDKs - -WORKDIR /dash - -RUN mkdir -p depends/SDKs && \ - curl -L https://bitcoincore.org/depends-sources/sdks/Xcode-15.0-15A240d-extracted-SDK-with-libcxx-headers.tar.gz | tar -xz -C depends/SDKs - diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md index d96e71edf218..b347c8350858 100644 --- a/contrib/guix/INSTALL.md +++ b/contrib/guix/INSTALL.md @@ -19,7 +19,6 @@ Otherwise, you may choose from one of the following options to install Guix: - Installs any release - Binary installation only, requires high level of trust 3. Using **Docker image** [↗︎ external instructions][install-docker] - - Maintained by pastapastapasta - Easy (automatically performs *some* setup) - Works wherever Docker images work - Installs any release @@ -59,8 +58,8 @@ so you should log out and log back in. ## Option 3: Using Docker image -Please refer to Docker's image -[here](https://github.com/dashpay/dash/tree/master/contrib/guix/Dockerfile). +Please refer to Docker image +[here](https://github.com/dashpay/dash/tree/master/contrib/containers/guix/Dockerfile). ## Option 4: Using a distribution-maintained package From 29e98e38c48dd0174f298d1ebbd5c3cf814cabbd Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Nov 2025 08:26:32 +0530 Subject: [PATCH 557/656] chore: document `USER_ID` and `GROUP_ID` in `docker-compose.yml` --- contrib/containers/develop/docker-compose.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/containers/develop/docker-compose.yml b/contrib/containers/develop/docker-compose.yml index 9238c97183e7..e513f4c4769c 100644 --- a/contrib/containers/develop/docker-compose.yml +++ b/contrib/containers/develop/docker-compose.yml @@ -4,6 +4,9 @@ services: build: context: '..' dockerfile: './develop/Dockerfile' + args: + USER_ID: 1000 # set this to $(id -u) of the host + GROUP_ID: 1000 # set this to $(id -g) of the host ports: - "9998:9998" # Mainnet Ports - "9999:9999" From 066d4094b1be2707ac8fe51c92b6ca8e613b5d52 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Nov 2025 08:27:23 +0530 Subject: [PATCH 558/656] chore: remove outdated `boot2docker` comment --- contrib/containers/develop/docker-compose.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/contrib/containers/develop/docker-compose.yml b/contrib/containers/develop/docker-compose.yml index e513f4c4769c..c6f2cea70424 100644 --- a/contrib/containers/develop/docker-compose.yml +++ b/contrib/containers/develop/docker-compose.yml @@ -16,19 +16,8 @@ services: - seccomp:unconfined stdin_open: true # Equivalent to -i tty: true # Equivalent to -t - -# A note about volumes: -# -# If Docker is interacting with your operating system directly -# without an intermediate VM, then you do not need to change anything -# -# But if not, then you'll need to mount your system's root directory -# (i.e. /) into the boot2docker instance if you want to mirror the exact -# filesystem structure of your host. -# volumes: - type: bind -# source: /host/$PWD # Workaround needed on non-Linux hosts source: ../../.. target: /src/dash From 82723dc788e287cbdacee303efe1e01f78dffd6c Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Fri, 21 Nov 2025 01:33:43 +0530 Subject: [PATCH 559/656] fix: don't forget to assign user to group if group exists --- contrib/containers/ci/ci-slim.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/containers/ci/ci-slim.Dockerfile b/contrib/containers/ci/ci-slim.Dockerfile index 1738f0ab619a..09843ccc34db 100644 --- a/contrib/containers/ci/ci-slim.Dockerfile +++ b/contrib/containers/ci/ci-slim.Dockerfile @@ -123,7 +123,7 @@ RUN set -ex; \ ARG USER_ID=1000 \ GROUP_ID=1000 RUN set -ex; \ - getent group ${GROUP_ID} || groupmod -g ${GROUP_ID} -n dash ubuntu; \ + (getent group ${GROUP_ID} && usermod -g ${GROUP_ID} ubuntu) || groupmod -g ${GROUP_ID} -n dash ubuntu; \ usermod -u ${USER_ID} -md /home/dash -l dash ubuntu; \ chown ${USER_ID}:${GROUP_ID} -R /home/dash; \ mkdir -p /src/dash && \ From 4d278c65744702d7fbf8bd836139bb14c74ed64e Mon Sep 17 00:00:00 2001 From: pasta Date: Thu, 20 Nov 2025 14:16:43 -0600 Subject: [PATCH 560/656] fix: shellcheck --- contrib/guix/libexec/build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 88023e71c25f..4e378b000979 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -354,7 +354,7 @@ mkdir -p "$DISTSRC" find "${DISTNAME}/lib" -type f -print0 } | while IFS= read -r -d '' elf; do if file "$elf" | grep -q "ELF.*executable\|ELF.*shared object"; then - build_id=$(${HOST}-readelf -n "$elf" 2>/dev/null | awk '/Build ID/ {print $3; exit}') + build_id=$("${HOST}"-readelf -n "$elf" 2>/dev/null | awk '/Build ID/ {print $3; exit}') if [ -n "$build_id" ] && [ -f "${elf}.dbg" ]; then dir="${DISTNAME}/usr/lib/debug/.build-id/${build_id:0:2}" mkdir -p "$dir" @@ -374,13 +374,13 @@ mkdir -p "$DISTSRC" while IFS= read -r -d '' elf; do if file "$elf" | grep -q "ELF.*executable\|ELF.*shared object"; then # Check for build-id - if ! ${HOST}-readelf -n "$elf" 2>/dev/null | grep -q "Build ID"; then + if ! "${HOST}"-readelf -n "$elf" 2>/dev/null | grep -q "Build ID"; then echo "ERROR: No build-id found in $elf" >&2 verification_failed=1 fi # Check for .gnu_debuglink - if ! ${HOST}-readelf --string-dump=.gnu_debuglink "$elf" >/dev/null 2>&1; then + if ! "${HOST}"-readelf --string-dump=.gnu_debuglink "$elf" >/dev/null 2>&1; then echo "ERROR: No .gnu_debuglink found in $elf" >&2 verification_failed=1 fi From c359fb28904b91805270a01953ac54b708b7e940 Mon Sep 17 00:00:00 2001 From: Kittywhiskers Van Gogh <63189531+kwvg@users.noreply.github.com> Date: Sun, 16 Nov 2025 08:48:34 +0530 Subject: [PATCH 561/656] ci: bump to Clang 19 --- ci/test/00_setup_env_native_fuzz.sh | 2 +- ci/test/00_setup_env_native_fuzz_with_valgrind.sh | 2 +- ci/test/00_setup_env_native_multiprocess.sh | 4 ++-- ci/test/00_setup_env_native_tsan.sh | 6 +++--- ci/test/00_setup_env_native_ubsan.sh | 4 ++-- ci/test/00_setup_env_native_valgrind.sh | 2 +- contrib/containers/ci/ci-slim.Dockerfile | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index 18929cc2b2f0..586496215358 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -15,4 +15,4 @@ export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --disable-ccache --enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC='clang-18 -ftrivial-auto-var-init=pattern' CXX='clang++-18 -ftrivial-auto-var-init=pattern' --with-boost-process" +export BITCOIN_CONFIG="--enable-zmq --disable-ccache --enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC='clang-19 -ftrivial-auto-var-init=pattern' CXX='clang++-19 -ftrivial-auto-var-init=pattern' --with-boost-process" diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh index f379d8ee7edc..59d921d54fc9 100755 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -15,5 +15,5 @@ export RUN_FUZZ_TESTS=true export FUZZ_TESTS_CONFIG="--valgrind" export GOAL="install" # Temporarily pin dwarf 4, until valgrind can understand clang's dwarf 5 -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang-18 CXX=clang++-18 CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer CC=clang-19 CXX=clang++-19 CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" export CCACHE_MAXSIZE=200M diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh index 7ab95903d131..f0e6e532b162 100755 --- a/ci/test/00_setup_env_native_multiprocess.sh +++ b/ci/test/00_setup_env_native_multiprocess.sh @@ -9,11 +9,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_multiprocess export HOST=x86_64-pc-linux-gnu export PACKAGES="cmake python3 llvm clang" -export DEP_OPTS="MULTIPROCESS=1 CC=clang-18 CXX=clang++-18" +export DEP_OPTS="MULTIPROCESS=1 CC=clang-19 CXX=clang++-19" export RUN_TIDY=true export GOAL="install" export TEST_RUNNER_EXTRA="--v2transport" -export BITCOIN_CONFIG="--with-boost-process --enable-debug CC=clang-18 CXX=clang++-18" # Use clang to avoid OOM +export BITCOIN_CONFIG="--with-boost-process --enable-debug CC=clang-19 CXX=clang++-19" # Use clang to avoid OOM # Additional flags for RUN_TIDY export BITCOIN_CONFIG="${BITCOIN_CONFIG} --disable-hardening CFLAGS='-O0 -g0' CXXFLAGS='-O0 -g0 -Wno-error=documentation'" export BITCOIND=dash-node # Used in functional tests diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 3820e469aa5f..2feddf269806 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -7,11 +7,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan -export PACKAGES="clang-18 llvm-18 libclang-rt-18-dev libc++abi-18-dev libc++-18-dev python3-zmq" -export DEP_OPTS="CC=clang-18 CXX='clang++-18 -stdlib=libc++'" +export PACKAGES="clang-19 llvm-19 libclang-rt-19-dev libc++abi-19-dev libc++-19-dev python3-zmq" +export DEP_OPTS="CC=clang-19 CXX='clang++-19 -stdlib=libc++'" export TEST_RUNNER_EXTRA="--extended --exclude feature_pruning,feature_dbcrash,wallet_multiwallet.py" # Temporarily suppress ASan heap-use-after-free (see issue #14163) export TEST_RUNNER_EXTRA="${TEST_RUNNER_EXTRA} --timeout-factor=4" # Increase timeout because sanitizers slow down export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-sanitizers=thread CC=clang-18 CXX=clang++-18 CXXFLAGS='-g' --with-boost-process" +export BITCOIN_CONFIG="--enable-zmq --with-sanitizers=thread CC=clang-19 CXX=clang++-19 CXXFLAGS='-g' --with-boost-process" export CPPFLAGS="-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION" export PYZMQ=true diff --git a/ci/test/00_setup_env_native_ubsan.sh b/ci/test/00_setup_env_native_ubsan.sh index 9fb440dfd41e..f562bce0edbf 100755 --- a/ci/test/00_setup_env_native_ubsan.sh +++ b/ci/test/00_setup_env_native_ubsan.sh @@ -8,8 +8,8 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_ubsan -export PACKAGES="clang-18 llvm-18 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" +export PACKAGES="clang-19 llvm-19 python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev" export DEP_OPTS="NO_UPNP=1 DEBUG=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --enable-reduce-exports --enable-crash-hooks --with-sanitizers=undefined CC=clang-18 CXX=clang++-18" +export BITCOIN_CONFIG="--enable-zmq --enable-reduce-exports --enable-crash-hooks --with-sanitizers=undefined CC=clang-19 CXX=clang++-19" export PYZMQ=true diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index d7bf8b2f3550..478a347d8c9d 100755 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -12,4 +12,4 @@ export NO_DEPENDS=1 export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra --timeout-factor=4" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export GOAL="install" # Temporarily pin dwarf 4, until valgrind can understand clang's dwarf 5 -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang-18 CXX=clang++-18 CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" # TODO enable GUI +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang-19 CXX=clang++-19 CFLAGS='-gdwarf-4' CXXFLAGS='-gdwarf-4'" # TODO enable GUI diff --git a/contrib/containers/ci/ci-slim.Dockerfile b/contrib/containers/ci/ci-slim.Dockerfile index ae59495f3981..fce0e5bc161b 100644 --- a/contrib/containers/ci/ci-slim.Dockerfile +++ b/contrib/containers/ci/ci-slim.Dockerfile @@ -110,7 +110,7 @@ RUN set -ex; \ ENV PATH="/opt/shellcheck:${PATH}" # Packages needed to be able to run sanitizer builds -ARG LLVM_VERSION=18 +ARG LLVM_VERSION=19 RUN set -ex; \ . /etc/os-release; \ curl -fsSL https://apt.llvm.org/llvm-snapshot.gpg.key > /etc/apt/trusted.gpg.d/apt.llvm.org.asc; \ From 0029e6f5402c29871824aa5ea97c225c8bb3554e Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Fri, 1 Sep 2023 13:17:42 -0400 Subject: [PATCH 562/656] Merge bitcoin/bitcoin#28350: Log explicit error message when coindb is found in inconsistent state df60de770d8e27c00eced29d8a4a4bce7c6e4b30 log: Print error message when coindb is in inconsistent state (Fabian Jahr) Pull request description: While doing manual testing on assumeutxo this week I managed to put the coindb into an inconsistent state twice. For a normal user, this can also happen if their computer crashes during a flush or if they try to stop their node during a flush and then get tired of waiting and just shut their computer down or kill the process. It's an edge case but I wouldn't be surprised if this does happen more often when assumeutxo gets used more widely because there might be multiple flushes happening during loading of the UTXO set in the beginning and users may think something is going wrong because of the unexpected wait or they forgot some configs and want to start over quickly. The problem is, when this happens at first the node starts up normally until it's time to flush again and then it hits an assert that the user can not understand. ``` 2023-08-25T16:31:09Z [httpworker.0] [snapshot] 52000000 coins loaded (43.30%, 6768 MB) 2023-08-25T16:31:16Z [httpworker.0] Cache size (7272532192) exceeds total space (7256510300) 2023-08-25T16:31:16Z [httpworker.0] FlushSnapshotToDisk: flushing coins cache (7272 MB) started Assertion failed: (old_heads[0] == hashBlock), function BatchWrite, file txdb.cpp, line 126. Abort trap: 6 ``` We should at least log an error message that gives users a hint of what the problem is and what they can do to resolve it. I am keeping this separate from the assumeutxo project since this issue can also happen during any regular flush. ACKs for top commit: jonatack: ACK df60de770d8e27c00eced29d8a4a4bce7c6e4b30 achow101: ACK df60de770d8e27c00eced29d8a4a4bce7c6e4b30 ryanofsky: Code review ACK df60de770d8e27c00eced29d8a4a4bce7c6e4b30 jamesob: Code review ACK df60de770d8e27c00eced29d8a4a4bce7c6e4b30 Tree-SHA512: b546aa0b0323ece2962867a29c38e014ac83ae8f1ded090da2894b4ff2450c05229629c7e8892f7b550cf7def4038a0b4119812e548e11b00c60b1dc3d4276d2 --- src/txdb.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/txdb.cpp b/src/txdb.cpp index 6237b26a707a..73827a9a3e5f 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -114,6 +114,9 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock, boo // We may be in the middle of replaying. std::vector old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { + if (old_heads[0] != hashBlock) { + LogPrintLevel(BCLog::COINDB, BCLog::Level::Error, "The coins database detected an inconsistent state, likely due to a previous crash or shutdown. You will need to restart bitcoind with the -reindex-chainstate or -reindex configuration option.\n"); + } assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } From 404230ca7634baa3e06cef050e257c9a043ba44c Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 5 Sep 2023 11:45:27 +0300 Subject: [PATCH 563/656] Merge bitcoin/bitcoin#28396: test: p2p: check that `getaddr` msgs are only responded once per connection 668aa6af8d5fbf047d43cf6f85f3335565637fb9 test: p2p: check that `getaddr` msgs are only responded once per connection (Sebastian Falbesoner) Pull request description: This simple PR adds missing test coverage for ignoring repeated `getaddr` requests (introduced in #7856, commit 66b07247a7a9e48e082502338176cc06edf61474): https://github.com/bitcoin/bitcoin/blob/6f03c45f6bb5a6edaa3051968b6a1ca4f84d2ccb/src/net_processing.cpp#L4642-L4648 ACKs for top commit: MarcoFalke: lgtm ACK 668aa6af8d5fbf047d43cf6f85f3335565637fb9 brunoerg: crACK 668aa6af8d5fbf047d43cf6f85f3335565637fb9 Tree-SHA512: edcdc6501c684fb41911e393f55ded9b044cd2f92918877eca152edd5a4287d1a9d57ae999f1cb42185eae00c3a0af411fcb9bcd5b990ef48849c3834b141584 --- test/functional/p2p_addr_relay.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index da8d8e1895c2..978bf5e655ec 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -296,6 +296,15 @@ def getaddr_tests(self): assert_equal(block_relay_peer.num_ipv4_received, 0) assert inbound_peer.num_ipv4_received > 100 + self.log.info('Check that we answer getaddr messages only once per connection') + received_addrs_before = inbound_peer.num_ipv4_received + with self.nodes[0].assert_debug_log(['Ignoring repeated "getaddr".']): + inbound_peer.send_and_ping(msg_getaddr()) + self.bump_mocktime(10 * 60) + inbound_peer.sync_with_ping() + received_addrs_after = inbound_peer.num_ipv4_received + assert_equal(received_addrs_before, received_addrs_after) + self.nodes[0].disconnect_p2ps() def blocksonly_mode_tests(self): From 40f088814f221685d57f8adca642e19bc3463eb4 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Tue, 17 Oct 2023 15:21:49 -0400 Subject: [PATCH 564/656] Merge bitcoin/bitcoin#28544: wallet: Add TxStateString function for debugging and logging 8a553c94098c96cb3679468c2b460be145a0eabf wallet: Add TxStateString function for debugging and logging (Ryan Ofsky) Pull request description: I found this useful while debugging silent conflict between #10102 and #27469 recently ACKs for top commit: ishaanam: utACK 8a553c94098c96cb3679468c2b460be145a0eabf achow101: ACK 8a553c94098c96cb3679468c2b460be145a0eabf furszy: Code ACK 8a553c9 Tree-SHA512: 87965c66bcb59a21e7639878bb567e583a0e624735721ff7ad1104eed6bb9fba60607d0e3de7be3304232b3a55f48bab7039ea9c26b0e81963e59f9acd94f666 --- src/wallet/transaction.h | 11 +++++++++++ src/wallet/wallet.cpp | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h index 0e7830b59e31..bac89c2f633f 100644 --- a/src/wallet/transaction.h +++ b/src/wallet/transaction.h @@ -28,10 +28,12 @@ struct TxStateConfirmed { int position_in_block; explicit TxStateConfirmed(const uint256& block_hash, int height, int index) : confirmed_block_hash(block_hash), confirmed_block_height(height), position_in_block(index) {} + std::string toString() const { return strprintf("Confirmed (block=%s, height=%i, index=%i)", confirmed_block_hash.ToString(), confirmed_block_height, position_in_block); } }; //! State of transaction added to mempool. struct TxStateInMempool { + std::string toString() const { return strprintf("InMempool"); } }; //! State of rejected transaction that conflicts with a confirmed block. @@ -40,6 +42,7 @@ struct TxStateConflicted { int conflicting_block_height; explicit TxStateConflicted(const uint256& block_hash, int height) : conflicting_block_hash(block_hash), conflicting_block_height(height) {} + std::string toString() const { return strprintf("Conflicted (block=%s, height=%i)", conflicting_block_hash.ToString(), conflicting_block_height); } }; //! State of transaction not confirmed or conflicting with a known block and @@ -50,6 +53,7 @@ struct TxStateInactive { bool abandoned; explicit TxStateInactive(bool abandoned = false) : abandoned(abandoned) {} + std::string toString() const { return strprintf("Inactive (abandoned=%i)", abandoned); } }; //! State of transaction loaded in an unrecognized state with unexpected hash or @@ -61,6 +65,7 @@ struct TxStateUnrecognized { int index; TxStateUnrecognized(const uint256& block_hash, int index) : block_hash(block_hash), index(index) {} + std::string toString() const { return strprintf("Unrecognized (block=%s, index=%i)", block_hash.ToString(), index); } }; //! All possible CWalletTx states @@ -108,6 +113,12 @@ static inline int TxStateSerializedIndex(const TxState& state) }, state); } +//! Return TxState or SyncTxState as a string for logging or debugging. +template +std::string TxStateString(const T& state) +{ + return std::visit([](const auto& s) { return s.toString(); }, state); +} typedef std::map mapValue_t; diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 1caf30747a3b..24c4a2e5ff1d 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -965,7 +965,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const LockProTxCoins(candidates, &batch); //// debug print - WalletLogPrintf("AddToWallet %s %s%s\n", hash.ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : "")); + WalletLogPrintf("AddToWallet %s %s%s %s\n", hash.ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : ""), TxStateString(state)); // Write to disk if (fInsertedNew || fUpdated) From 59bb09989f6398debddc10e57730cd0d0fc2a2d7 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 2 Nov 2023 15:19:26 -0400 Subject: [PATCH 565/656] Merge bitcoin/bitcoin#27852: test: add coverage to rpc_blockchain.py 376dc2cfb32806a8aa450589effe4d384e648398 test: add coverage to rpc_blockchain.py (kevkevin) Pull request description: Included a test that checks the functionality of setting the first param of getnetworkhashps to negative value returns the average network hashes per second from the last difficulty change. ACKs for top commit: jlopp: tACK https://github.com/bitcoin/bitcoin/commit/376dc2cfb32806a8aa450589effe4d384e648398 achow101: ACK 376dc2cfb32806a8aa450589effe4d384e648398 ismaelsadeeq: Tested ACK 376dc2cfb32806a8aa450589effe4d384e648398 pablomartin4btc: tACK 376dc2cfb32806a8aa450589effe4d384e648398 Tree-SHA512: 02d52f622e9cb7a1240c5d124510dd75d03f696f119b2625b0befd60b004ec50ff1a2d5515e0e227601adeecd837e0778ed131ee2a8c5f75f1b824be711213a7 --- test/functional/rpc_blockchain.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index ba74f84980f5..0f1381d47385 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -59,6 +59,7 @@ TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP TIME_RANGE_TIP = TIME_GENESIS_BLOCK + (HEIGHT - 1) * TIME_RANGE_STEP TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP +DIFFICULTY_ADJUSTMENT_INTERVAL = 2016 class BlockchainTest(BitcoinTestFramework): @@ -431,6 +432,15 @@ def _test_getnetworkhashps(self): # This should be 2 hashes every 2.6 minutes (156 seconds) or 1/78 assert abs(hashes_per_second * 78 - 1) < 0.0001 + # Test setting the first param of getnetworkhashps to negative value returns the average network + # hashes per second from the last difficulty change. + current_block_height = self.nodes[0].getmininginfo()['blocks'] + blocks_since_last_diff_change = current_block_height % DIFFICULTY_ADJUSTMENT_INTERVAL + 1 + expected_hashes_per_second_since_diff_change = self.nodes[0].getnetworkhashps(blocks_since_last_diff_change) + + assert_equal(self.nodes[0].getnetworkhashps(-1), expected_hashes_per_second_since_diff_change) + assert_equal(self.nodes[0].getnetworkhashps(-2), expected_hashes_per_second_since_diff_change) + def _test_stopatheight(self): assert_equal(self.nodes[0].getblockcount(), HEIGHT) self.generate(self.wallet, 6) From 7a3df4ed14f347c8ad855391c13f23ffbd013d3e Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 18 Dec 2023 13:46:39 +0000 Subject: [PATCH 566/656] Merge bitcoin/bitcoin#29064: fuzz: Improve fuzzing stability for minisketch harness b2fc7a2eda103724ac8cbeaf99df3ce6f5b7d974 [fuzz] Improve fuzzing stability for minisketch harness (dergoegge) Pull request description: The `minisketch` harness has low stability due to: * Rng internal to minisketch * Benchmarkning for the best minisketch impl Fix this by seeding the rng and letting the fuzzer choose the impl. Also see #29018. ACKs for top commit: maflcko: review ACK b2fc7a2eda103724ac8cbeaf99df3ce6f5b7d974 Tree-SHA512: 3d81414299c6803c34e928a53bcf843722fa8c38e1d3676cde7fa80923f9058b1ad4b9a2941f718303a6641b17eeb28b4a22eda09678102e9fb7c4e31d06f8f2 --- src/test/fuzz/minisketch.cpp | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/src/test/fuzz/minisketch.cpp b/src/test/fuzz/minisketch.cpp index a17be73f6c80..698cb15fc94b 100644 --- a/src/test/fuzz/minisketch.cpp +++ b/src/test/fuzz/minisketch.cpp @@ -12,14 +12,27 @@ #include #include -using node::MakeMinisketch32; +namespace { + +Minisketch MakeFuzzMinisketch32(size_t capacity, uint32_t impl) +{ + return Assert(Minisketch(32, impl, capacity)); +} + +} // namespace FUZZ_TARGET(minisketch) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; + const auto capacity{fuzzed_data_provider.ConsumeIntegralInRange(1, 200)}; - Minisketch sketch_a{Assert(MakeMinisketch32(capacity))}; - Minisketch sketch_b{Assert(MakeMinisketch32(capacity))}; + const uint32_t impl{fuzzed_data_provider.ConsumeIntegralInRange(0, Minisketch::MaxImplementation())}; + if (!Minisketch::ImplementationSupported(32, impl)) return; + + Minisketch sketch_a{MakeFuzzMinisketch32(capacity, impl)}; + Minisketch sketch_b{MakeFuzzMinisketch32(capacity, impl)}; + sketch_a.SetSeed(fuzzed_data_provider.ConsumeIntegral()); + sketch_b.SetSeed(fuzzed_data_provider.ConsumeIntegral()); // Fill two sets and keep the difference in a map std::map diff; @@ -47,8 +60,11 @@ FUZZ_TARGET(minisketch) } const auto num_diff{std::accumulate(diff.begin(), diff.end(), size_t{0}, [](auto n, const auto& e) { return n + e.second; })}; - Minisketch sketch_ar{MakeMinisketch32(capacity)}; - Minisketch sketch_br{MakeMinisketch32(capacity)}; + Minisketch sketch_ar{MakeFuzzMinisketch32(capacity, impl)}; + Minisketch sketch_br{MakeFuzzMinisketch32(capacity, impl)}; + sketch_ar.SetSeed(fuzzed_data_provider.ConsumeIntegral()); + sketch_br.SetSeed(fuzzed_data_provider.ConsumeIntegral()); + sketch_ar.Deserialize(sketch_a.Serialize()); sketch_br.Deserialize(sketch_b.Serialize()); From 0fac60f23cd9da80e886df61347d548cd2131521 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 16 Jan 2024 10:51:45 +0000 Subject: [PATCH 567/656] Merge bitcoin/bitcoin#29230: doc: update -loglevel help to add `info` to the always logged levels ec779a2b8e4fcc00596ee8833be35ae9b326552c doc: add unconditional info loglevel following merge of PR 28318 (Jon Atack) Pull request description: Commit ab34dc6012351e7b8aab871dd9d2b38ade1cd9b of #28318 was an incomplete version of [`118c756` (#25203)](https://github.com/bitcoin/bitcoin/pull/25203/commits/118c7567f62df2b882877590f232242d7c627a05) from the `Severity-based logging` parent PR. Add the missing text to update the `-loglevel` help doc. While here, make the help text a little easier to understand. Can be tested by running: ``` ./src/bitcoind -regtest -help-debug | grep -A12 loglevel= ``` before ``` -loglevel=|: Set the global or per-category severity level for logging categories enabled with the -debug configuration option or the logging RPC: info, debug, trace (default=debug); warning and error levels are always logged. ``` after ``` -loglevel=|: Set the global or per-category severity level for logging categories enabled with the -debug configuration option or the logging RPC. Possible values are info, debug, trace (default=debug). The following levels are always logged: error, warning, info. ``` ACKs for top commit: stickies-v: ACK ec779a2b8e4fcc00596ee8833be35ae9b326552c Tree-SHA512: 0c375e30a5a4c168ca7d97720e8c287f598216767afedae329824e09a480830faf8537b792c5c4bb647c68681c287fe3005c62093708ce85624e9a71c8245e42 --- src/init/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init/common.cpp b/src/init/common.cpp index 0a1bff80d48d..d04764d0c73e 100644 --- a/src/init/common.cpp +++ b/src/init/common.cpp @@ -65,7 +65,7 @@ void AddLoggingArgs(ArgsManager& argsman) ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-debugexclude=", "Exclude debug and trace logging for a category. Can be used in conjunction with -debug=1 to output debug and trace logging for all categories except the specified category. This option can be specified multiple times to exclude multiple categories.", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-loglevel=|:", strprintf("Set the global or per-category severity level for logging categories enabled with the -debug configuration option or the logging RPC: %s (default=%s); warning and error levels are always logged. If : is supplied, the setting will override the global one and may be specified multiple times to set multiple category-specific levels. can be: %s.", LogInstance().LogLevelsString(), LogInstance().LogLevelToStr(BCLog::DEFAULT_LOG_LEVEL), LogInstance().LogCategoriesString()), ArgsManager::DISALLOW_NEGATION | ArgsManager::DISALLOW_ELISION | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-loglevel=|:", strprintf("Set the global or per-category severity level for logging categories enabled with the -debug configuration option or the logging RPC. Possible values are %s (default=%s). The following levels are always logged: error, warning, info. If : is supplied, the setting will override the global one and may be specified multiple times to set multiple category-specific levels. can be: %s.", LogInstance().LogLevelsString(), LogInstance().LogLevelToStr(BCLog::DEFAULT_LOG_LEVEL), LogInstance().LogCategoriesString()), ArgsManager::DISALLOW_NEGATION | ArgsManager::DISALLOW_ELISION | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logthreadnames", strprintf("Prepend debug output with name of the originating thread (only available on platforms supporting thread_local) (default: %u)", DEFAULT_LOGTHREADNAMES), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logsourcelocations", strprintf("Prepend debug output with name of the originating source location (source file, line number and function name) (default: %u)", DEFAULT_LOGSOURCELOCATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); From 39baa2388aec6aebef36207f39d241f129a6532b Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 26 Feb 2024 12:26:03 +0000 Subject: [PATCH 568/656] Merge bitcoin/bitcoin#29467: test: Fix intermittent issue in interface_rest.py faeed91c0be6e5dda4790522d0dc999afd869d11 test: Fix intermittent issue in interface_rest.py (MarcoFalke) Pull request description: Fixes: ``` test 2024-02-22T16:15:37.465000Z TestFramework (ERROR): Assertion failed Traceback (most recent call last): File "/ci_container_base/ci/scratch/build/bitcoin-x86_64-pc-linux-gnu/test/functional/test_framework/test_framework.py", line 131, in main self.run_test() File "/ci_container_base/ci/scratch/build/bitcoin-x86_64-pc-linux-gnu/test/functional/interface_rest.py", line 340, in run_test assert_equal(json_obj, mempool_info) File "/ci_container_base/ci/scratch/build/bitcoin-x86_64-pc-linux-gnu/test/functional/test_framework/util.py", line 57, in assert_equal raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) AssertionError: not({'loaded': True, 'size': 3, 'bytes': 312, 'usage': 3600, 'total_fee': Decimal('0.00093600'), 'maxmempool': 300000000, 'mempoolminfee': Decimal('0.00001000'), 'minrelaytxfee': Decimal('0.00001000'), 'incrementalrelayfee': Decimal('0.00001000'), 'unbroadcastcount': 1, 'fullrbf': False} == {'loaded': True, 'size': 3, 'bytes': 312, 'usage': 3600, 'total_fee': Decimal('0.00093600'), 'maxmempool': 300000000, 'mempoolminfee': Decimal('0.00001000'), 'minrelaytxfee': Decimal('0.00001000'), 'incrementalrelayfee': Decimal('0.00001000'), 'unbroadcastcount': 0, 'fullrbf': False}) ``` https://cirrus-ci.com/task/4852944378527744?logs=ci#L4436 ACKs for top commit: m3dwards: ACK https://github.com/bitcoin/bitcoin/pull/29467/commits/faeed91c0be6e5dda4790522d0dc999afd869d11 mzumsande: ACK faeed91c0be6e5dda4790522d0dc999afd869d11 Tree-SHA512: 513422229db45d2586c554b9a466e86848bfcf5280b0f000718cbfc44d93dd1af69e19a56f6ac578f5d7aada74ab0c90d4a9e09a324062b6f9ed239e5e34f540 --- test/functional/interface_rest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 5f63718c4bba..9ec1fe9bbece 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -336,6 +336,9 @@ def run_test(self): assert_greater_than(json_obj['bytes'], 240) mempool_info = self.nodes[0].getmempoolinfo() + # pop unstable unbroadcastcount before check + for obj in [json_obj, mempool_info]: + obj.pop("unbroadcastcount") assert_equal(json_obj, mempool_info) # Check that there are our submitted transactions in the TX memory pool From b84939e03545b2613f0d17d2960c83b1892b2896 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 1 Mar 2024 10:07:35 -0500 Subject: [PATCH 569/656] Merge bitcoin/bitcoin#29495: fuzz: add target for local address stuff 25eab523897e790f4f4d7b49cdbf19d13e3b0fcc fuzz: add target for local addresses (brunoerg) Pull request description: This PR adds fuzz target for local address functions - (`AddLocal`, `RemoveLocal`, `SeenLocal`, `IsLocal`) ACKs for top commit: dergoegge: ACK 25eab523897e790f4f4d7b49cdbf19d13e3b0fcc vasild: ACK 25eab523897e790f4f4d7b49cdbf19d13e3b0fcc Tree-SHA512: 24faaab86dcd8835ba0e2d81fb6322a39a9266c7edf66415dbc4421754054f47efb6e0de4efdc7ea026b0686792658e86a526f7cf27cbc6cf9ed0c4aed376f97 --- src/test/fuzz/net.cpp | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/test/fuzz/net.cpp b/src/test/fuzz/net.cpp index 1de4877f94ae..0089d0f2e8a2 100644 --- a/src/test/fuzz/net.cpp +++ b/src/test/fuzz/net.cpp @@ -94,3 +94,40 @@ FUZZ_TARGET(net, .init = initialize_net) (void)node.HasPermission(net_permission_flags); (void)node.ConnectedThroughNetwork(); } + +FUZZ_TARGET(local_address, .init = initialize_net) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + CService service{ConsumeService(fuzzed_data_provider)}; + CNode node{ConsumeNode(fuzzed_data_provider)}; + { + LOCK(g_maplocalhost_mutex); + mapLocalHost.clear(); + } + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) { + CallOneOf( + fuzzed_data_provider, + [&] { + service = ConsumeService(fuzzed_data_provider); + }, + [&] { + const bool added{AddLocal(service, fuzzed_data_provider.ConsumeIntegralInRange(0, LOCAL_MAX - 1))}; + if (!added) return; + assert(service.IsRoutable()); + assert(IsLocal(service)); + assert(SeenLocal(service)); + }, + [&] { + (void)RemoveLocal(service); + }, + [&] { + (void)SeenLocal(service); + }, + [&] { + (void)IsLocal(service); + }, + [&] { + (void)GetLocalAddress(node); + }); + } +} From eacb6b0327942345925ab966c4a90a13cf14c33c Mon Sep 17 00:00:00 2001 From: pasta Date: Fri, 21 Nov 2025 12:14:40 -0600 Subject: [PATCH 570/656] refactor: optimize node disconnection logic to avoid unnecessary locks --- src/net.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 19f86ab39052..1a2032bfb784 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2039,18 +2039,27 @@ void CConnman::DisconnectNodes() // m_reconnections_mutex while holding m_nodes_mutex. decltype(m_reconnections) reconnections_to_add; + bool has_to_disconnect = false; { - LOCK(m_nodes_mutex); + READ_LOCK(m_nodes_mutex); - if (!fNetworkActive) { - // Disconnect any connected nodes - for (CNode* pnode : m_nodes) { - if (!pnode->fDisconnect) { - LogPrint(BCLog::NET_NETCONN, "Network not active, dropping peer=%d\n", pnode->GetId()); - pnode->fDisconnect = true; - } + for (CNode* pnode : m_nodes) { + // Disconnect any connected nodes, if network is not active + if (!fNetworkActive && !pnode->fDisconnect) { + LogPrint(BCLog::NET_NETCONN, "Network not active, dropping peer=%d\n", pnode->GetId()); + pnode->fDisconnect = true; + } + if (!has_to_disconnect && pnode->fDisconnect) { + has_to_disconnect = true; } } + } + + // Avoid taking locks if there's nothing to do + if (!has_to_disconnect) return; + + { + LOCK(m_nodes_mutex); // Disconnect unused nodes for (auto it = m_nodes.begin(); it != m_nodes.end(); ) From f83a1355a8c87c5e9d70fc956a09b5fef4b95994 Mon Sep 17 00:00:00 2001 From: pasta Date: Fri, 21 Nov 2025 12:47:48 -0600 Subject: [PATCH 571/656] refactor: segregate CreateSigShare for single member handling --- src/llmq/signing_shares.cpp | 62 +++++++++++++++++++++---------------- src/llmq/signing_shares.h | 2 ++ 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/llmq/signing_shares.cpp b/src/llmq/signing_shares.cpp index 43bc21af4af9..b6098e7b6f99 100644 --- a/src/llmq/signing_shares.cpp +++ b/src/llmq/signing_shares.cpp @@ -1645,46 +1645,54 @@ void CSigSharesManager::SignPendingSigShares() } } -std::optional CSigSharesManager::CreateSigShare(const CQuorum& quorum, const uint256& id, const uint256& msgHash) const +std::optional CSigSharesManager::CreateSigShareForSingleMember(const CQuorum& quorum, const uint256& id, const uint256& msgHash) const { cxxtimer::Timer t(true); auto activeMasterNodeProTxHash = m_mn_activeman.GetProTxHash(); - if (!quorum.IsValidMember(activeMasterNodeProTxHash)) { + int memberIdx = quorum.GetMemberIndex(activeMasterNodeProTxHash); + if (memberIdx == -1) { + // this should really not happen (IsValidMember gave true) return std::nullopt; } - if (quorum.params.is_single_member()) { - int memberIdx = quorum.GetMemberIndex(activeMasterNodeProTxHash); - if (memberIdx == -1) { - // this should really not happen (IsValidMember gave true) - return std::nullopt; - } + CSigShare sigShare(quorum.params.type, quorum.qc->quorumHash, id, msgHash, uint16_t(memberIdx), {}); + uint256 signHash = sigShare.buildSignHash().Get(); - CSigShare sigShare(quorum.params.type, quorum.qc->quorumHash, id, msgHash, uint16_t(memberIdx), {}); - uint256 signHash = sigShare.buildSignHash().Get(); + // TODO: This one should be SIGN by QUORUM key, not by OPERATOR key + // see TODO in CDKGSession::FinalizeSingleCommitment for details + auto bls_scheme = bls::bls_legacy_scheme.load(); + sigShare.sigShare.Set(m_mn_activeman.Sign(signHash, bls_scheme), bls_scheme); - // TODO: This one should be SIGN by QUORUM key, not by OPERATOR key - // see TODO in CDKGSession::FinalizeSingleCommitment for details - auto bls_scheme = bls::bls_legacy_scheme.load(); - sigShare.sigShare.Set(m_mn_activeman.Sign(signHash, bls_scheme), bls_scheme); + if (!sigShare.sigShare.Get().IsValid()) { + LogPrintf("CSigSharesManager::%s -- failed to sign sigShare. signHash=%s, id=%s, msgHash=%s, time=%s\n", + __func__, signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), + t.count()); + return std::nullopt; + } - if (!sigShare.sigShare.Get().IsValid()) { - LogPrintf("CSigSharesManager::%s -- failed to sign sigShare. signHash=%s, id=%s, msgHash=%s, time=%s\n", - __func__, signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), - t.count()); - return std::nullopt; - } + sigShare.UpdateKey(); - sigShare.UpdateKey(); + LogPrint(BCLog::LLMQ_SIGS, /* Continued */ + "CSigSharesManager::%s -- created sigShare. signHash=%s, id=%s, msgHash=%s, llmqType=%d, quorum=%s, " + "time=%s\n", + __func__, signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), + ToUnderlying(quorum.params.type), quorum.qc->quorumHash.ToString(), t.count()); - LogPrint(BCLog::LLMQ_SIGS, /* Continued */ - "CSigSharesManager::%s -- created sigShare. signHash=%s, id=%s, msgHash=%s, llmqType=%d, quorum=%s, " - "time=%s\n", - __func__, signHash.ToString(), sigShare.getId().ToString(), sigShare.getMsgHash().ToString(), - ToUnderlying(quorum.params.type), quorum.qc->quorumHash.ToString(), t.count()); + return sigShare; +} - return sigShare; +std::optional CSigSharesManager::CreateSigShare(const CQuorum& quorum, const uint256& id, const uint256& msgHash) const +{ + cxxtimer::Timer t(true); + auto activeMasterNodeProTxHash = m_mn_activeman.GetProTxHash(); + + if (!quorum.IsValidMember(activeMasterNodeProTxHash)) { + return std::nullopt; + } + + if (quorum.params.is_single_member()) { + return CreateSigShareForSingleMember(quorum, id, msgHash); } const CBLSSecretKey& skShare = quorum.GetSkShare(); if (!skShare.IsValid()) { diff --git a/src/llmq/signing_shares.h b/src/llmq/signing_shares.h index d729fe31f442..24865fec98a8 100644 --- a/src/llmq/signing_shares.h +++ b/src/llmq/signing_shares.h @@ -452,6 +452,8 @@ class CSigSharesManager : public CRecoveredSigsListener void NotifyRecoveredSig(const std::shared_ptr& sig) const EXCLUSIVE_LOCKS_REQUIRED(!cs); private: + std::optional CreateSigShareForSingleMember(const CQuorum& quorum, const uint256& id, const uint256& msgHash) const; + // all of these return false when the currently processed message should be aborted (as each message actually contains multiple messages) bool ProcessMessageSigSesAnn(const CNode& pfrom, const CSigSesAnn& ann) EXCLUSIVE_LOCKS_REQUIRED(!cs); bool ProcessMessageSigSharesInv(const CNode& pfrom, const CSigSharesInv& inv) EXCLUSIVE_LOCKS_REQUIRED(!cs); From b78bf3eed9a75cd6b183d454b746cd104628b8c0 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 23 Feb 2022 09:05:57 +0100 Subject: [PATCH 572/656] Merge bitcoin/bitcoin#24419: lint: remove no-longer used exceptions from lint-format-strings.py 5b8f2484baad451b5c24725bf3387c79213b0695 lint: remove no-longer used exceptions from lint-format-strings.py (fanquake) Pull request description: ACKs for top commit: laanwj: ACK 5b8f2484baad451b5c24725bf3387c79213b0695 if it passes CI hebasto: ACK 5b8f2484baad451b5c24725bf3387c79213b0695, I've verified that all of the remained false positive cases are valid. Tree-SHA512: 25c40714d271c57fb09c963a3372b62c7b4f2e9367517cdf5c73ea82527a9c4c477f8b7857e37adc7eb9feea1f0a37435059798ddf2195dee3522bed3a6eea44 --- test/lint/run-lint-format-strings.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/lint/run-lint-format-strings.py b/test/lint/run-lint-format-strings.py index 985a1c5b6fd9..4e06a93f44d3 100755 --- a/test/lint/run-lint-format-strings.py +++ b/test/lint/run-lint-format-strings.py @@ -28,8 +28,6 @@ ("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"), ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(std::string fmt, Params... parameters)"), ("src/wallet/scriptpubkeyman.h", "LogPrintf((\"%s \" + fmt).c_str(), m_storage.GetDisplayName(), parameters...)"), - ("src/logging.h", "LogPrintf(const char* fmt, const Args&... args)"), - ("src/wallet/scriptpubkeyman.h", "WalletLogPrintf(const std::string& fmt, const Params&... parameters)"), ] def parse_function_calls(function_name, source_code): From f89f8a1c61450d001669a6babcd949790b1a27a2 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 18 Jul 2022 14:15:06 -0400 Subject: [PATCH 573/656] Merge bitcoin/bitcoin#25351: rpc, wallet: Scan mempool after import* - Second attempt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1be796418934ae7370cb0ed501877db59e738106 test, wallet: Add mempool rescan test for import RPCs (Fabian Jahr) 833ce76df712932c19e99737e87b5569e2bca34b rpc, wallet: Document mempool rescan after importdescriptor, importwallet (Fabian Jahr) 0e396d1ba701c9ac6280a98bf37f53352167e724 rpc, wallet: Document mempool scan after importmulti (Fabian Jahr) e6d3ef85867545a5a66a211e35e818e8a1b166fa rpc, wallet: Document mempool scan after importpubkey (Fabian Jahr) 6d3db52e667474b6c0c2e4eeb9fb5b3ba4063205 rpc, wallet: Document and test mempool scan after importprivkey (João Barbosa) 3abdbbb90a4a8f2041fec37506268e66a0b3eb31 rpc, wallet: Document and test mempool scan after importaddress (João Barbosa) 236239bd40ae1175537fc932df5af27902326329 wallet: Rescan mempool for transactions as well (Fabian Jahr) Pull request description: This PR picks up the work from #18964 and closes #18954. It should incorporate all the unaddressed feedback from the PR: - Mempool rescan now expanded to all relevant import* RPCs - Added documentation in the help of each RPC - More tests ACKs for top commit: Sjors: re-utACK 1be796418934ae7370cb0ed501877db59e738106 (only a test change) achow101: ACK 1be796418934ae7370cb0ed501877db59e738106 w0xlt: reACK https://github.com/bitcoin/bitcoin/pull/25351/commits/1be796418934ae7370cb0ed501877db59e738106 Tree-SHA512: b62fed5f97c6c242b2af417b41c9696a1f18878483d9e1c9429791f9c05257f57a00540a9a84df23c49faf6a61c3109c22972de81540083f38b506217804fcc5 --- src/wallet/rpc/backup.cpp | 20 ++++--- src/wallet/test/wallet_tests.cpp | 8 +-- src/wallet/wallet.cpp | 7 ++- test/functional/rpc_wipewallettxes.py | 4 +- test/functional/wallet_balance.py | 23 ++++++++ test/functional/wallet_import_rescan.py | 59 +++++++++++++++++---- test/functional/wallet_importdescriptors.py | 6 ++- 7 files changed, 103 insertions(+), 24 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index 055aceff5ea7..080b0f1e3b2c 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -91,11 +91,13 @@ RPCHelpMan importprivkey() "Hint: use importmulti to import more than one private key.\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported key exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" + "The rescan parameter can be set to false if the key was never used to create transactions. If it is set to false,\n" + "but the key was used to create transactions, rescanwallet needs to be called with the appropriate block range.\n" "Note: This command is only compatible with legacy wallets. Use \"importdescriptors\" with \"combo(X)\" for descriptor wallets.\n", { {"privkey", RPCArg::Type::STR, RPCArg::Optional::NO, "The private key (see dumpprivkey)"}, {"label", RPCArg::Type::STR, RPCArg::DefaultHint{"current label if address exists, otherwise \"\""}, "An optional label"}, - {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Rescan the wallet for transactions"}, + {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Scan the chain and mempool for wallet transactions."}, }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{ @@ -208,6 +210,8 @@ RPCHelpMan importaddress() "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported address exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" + "The rescan parameter can be set to false if the key was never used to create transactions. If it is set to false,\n" + "but the key was used to create transactions, rescanwallet needs to be called with the appropriate block range.\n" "If you have the full public key, you should call importpubkey instead of this.\n" "Hint: use importmulti to import more than one address.\n" "\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n" @@ -216,7 +220,7 @@ RPCHelpMan importaddress() { {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The Dash address (or hex-encoded script)"}, {"label", RPCArg::Type::STR, RPCArg::Default{""}, "An optional label"}, - {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Rescan the wallet for transactions"}, + {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Scan the chain and mempool for wallet transactions."}, {"p2sh", RPCArg::Type::BOOL, RPCArg::Default{false}, "Add the P2SH version of the script as well"}, }, RPCResult{RPCResult::Type::NONE, "", ""}, @@ -404,11 +408,13 @@ RPCHelpMan importpubkey() "Hint: use importmulti to import more than one public key.\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported pubkey exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" + "The rescan parameter can be set to false if the key was never used to create transactions. If it is set to false,\n" + "but the key was used to create transactions, rescanwallet needs to be called with the appropriate block range.\n" "Note: This command is only compatible with legacy wallets. Use \"importdescriptors\" with \"combo(X)\" for descriptor wallets.\n", { {"pubkey", RPCArg::Type::STR, RPCArg::Optional::NO, "The hex-encoded public key"}, {"label", RPCArg::Type::STR, RPCArg::Default{""}, "An optional label"}, - {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Rescan the wallet for transactions"}, + {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Scan the chain and mempool for wallet transactions."}, }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{ @@ -484,7 +490,7 @@ RPCHelpMan importwallet() { return RPCHelpMan{"importwallet", "\nImports keys from a wallet dump file (see dumpwallet). Requires a new wallet backup to include imported keys.\n" - "Note: Use \"getwalletinfo\" to query the scanning progress.\n" + "Note: Blockchain and Mempool will be rescanned after a successful import. Use \"getwalletinfo\" to query the scanning progress.\n" "Note: This command is only compatible with legacy wallets.\n", { {"filename", RPCArg::Type::STR, RPCArg::Optional::NO, "The wallet file"}, @@ -1475,6 +1481,8 @@ RPCHelpMan importmulti() "\nImport addresses/scripts (with private or public keys, redeem script (P2SH)), optionally rescanning the blockchain from the earliest creation time of the imported scripts. Requires a new wallet backup.\n" "If an address/script is imported without all of the private keys required to spend from that address, it will be watchonly. The 'watchonly' option must be set to true in this case or a warning will be returned.\n" "Conversely, if all the private keys are provided and the address/script is spendable, the watchonly option must be set to false, or a warning will be returned.\n" + "The rescan parameter can be set to false if the key was never used to create transactions. If it is set to false,\n" + "but the key was used to create transactions, rescanwallet needs to be called with the appropriate block range.\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported keys, addresses or scripts exists but related transactions are still missing.\n" "Note: This command is only compatible with legacy wallets. Use \"importdescriptors\" for descriptor wallets.\n", @@ -1518,7 +1526,7 @@ RPCHelpMan importmulti() "\"requests\""}, {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "", { - {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Stating if should rescan the blockchain after all imports"}, + {"rescan", RPCArg::Type::BOOL, RPCArg::Default{true}, "Scan the chain and mempool for wallet transactions after all imports."}, }, "\"options\""}, }, @@ -1830,7 +1838,7 @@ RPCHelpMan importdescriptors() { " Use the string \"now\" to substitute the current synced blockchain time.\n" " \"now\" can be specified to bypass scanning, for outputs which are known to never have been used, and\n" " 0 can be specified to scan the entire blockchain. Blocks up to 2 hours before the earliest timestamp\n" - " of all descriptors being imported will be scanned.", + " of all descriptors being imported will be scanned as well as the mempool.", /*oneline_description=*/"", {"timestamp | \"now\"", "integer / string"} }, {"internal", RPCArg::Type::BOOL, RPCArg::Default{false}, "Whether matching outputs should be treated as not incoming payments (e.g. change)"}, diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 46f5d3c20773..a6dfad77ce47 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -65,9 +65,6 @@ static std::shared_ptr TestLoadWallet(WalletContext& context) AddWallet(context, wallet); } NotifyWalletLoaded(context, wallet); - if (context.chain) { - wallet->postInitProcess(); - } return wallet; } @@ -784,6 +781,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) // being blocked wallet = TestLoadWallet(context); BOOST_CHECK(rescan_completed); + // AddToWallet events for block_tx and mempool_tx BOOST_CHECK_EQUAL(addtx_count, 2); { LOCK(wallet->cs_wallet); @@ -796,6 +794,8 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) // transactionAddedToMempool events are processed promise.set_value(); SyncWithValidationInterfaceQueue(); + // AddToWallet events for block_tx and mempool_tx events are counted a + // second time as the notificaiton queue is processed BOOST_CHECK_EQUAL(addtx_count, 4); TestUnloadWallet(context, std::move(wallet)); @@ -818,7 +818,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) SyncWithValidationInterfaceQueue(); }); wallet = TestLoadWallet(context); - BOOST_CHECK_EQUAL(addtx_count, 4); + BOOST_CHECK_EQUAL(addtx_count, 2); { LOCK(wallet->cs_wallet); BOOST_CHECK_EQUAL(wallet->mapWallet.count(block_tx.GetHash()), 1U); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 24c4a2e5ff1d..583480930e9c 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1685,7 +1685,8 @@ int64_t CWallet::RescanFromTime(int64_t startTime, const WalletRescanReserver& r /** * Scan the block chain (starting in start_block) for transactions * from or to us. If fUpdate is true, found transactions that already - * exist in the wallet will be updated. + * exist in the wallet will be updated. If max_height is not set, the + * mempool will be scanned as well. * * @param[in] start_block Scan starting block. If block is not on the active * chain, the scan will return SUCCESS immediately. @@ -1806,6 +1807,10 @@ CWallet::ScanResult CWallet::ScanForWalletTransactions(const uint256& start_bloc } } } + if (!max_height) { + WalletLogPrintf("Scanning current mempool transactions.\n"); + WITH_LOCK(cs_wallet, chain().requestMempoolTransactions(*this)); + } ShowProgress(strprintf("%s " + _("Rescanning…").translated, GetDisplayName()), 100); // hide progress dialog in GUI if (block_height && fAbortRescan) { WalletLogPrintf("Rescan aborted at block %d. Progress=%f\n", block_height, progress_current); diff --git a/test/functional/rpc_wipewallettxes.py b/test/functional/rpc_wipewallettxes.py index 5a6560bde07b..0a594f6288d7 100755 --- a/test/functional/rpc_wipewallettxes.py +++ b/test/functional/rpc_wipewallettxes.py @@ -32,9 +32,9 @@ def run_test(self): assert_equal(self.nodes[0].getwalletinfo()["txcount"], 104) self.nodes[0].wipewallettxes(True) assert_equal(self.nodes[0].getwalletinfo()["txcount"], 103) - self.nodes[0].rescanblockchain() - assert_equal(self.nodes[0].getwalletinfo()["txcount"], 103) assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", self.nodes[0].gettransaction, txid) + self.nodes[0].rescanblockchain() + assert_equal(self.nodes[0].getwalletinfo()["txcount"], 104) if __name__ == '__main__': diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py index 0142f89158d0..ba9022bc7f7c 100755 --- a/test/functional/wallet_balance.py +++ b/test/functional/wallet_balance.py @@ -280,6 +280,29 @@ def test_balances(*, fee_node_1=0): self.generatetoaddress(self.nodes[1], 1, ADDRESS_WATCHONLY) assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin + if not self.options.descriptors: + self.log.info('Check if mempool is taken into account after import*') + address = self.nodes[0].getnewaddress() + privkey = self.nodes[0].dumpprivkey(address) + self.nodes[0].sendtoaddress(address, 0.1) + self.nodes[0].unloadwallet('') + # check importaddress on fresh wallet + self.nodes[0].createwallet('w1', False, True) + self.nodes[0].importaddress(address) + assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], 0) + assert_equal(self.nodes[0].getbalances()['watchonly']['untrusted_pending'], Decimal('0.1')) + self.nodes[0].importprivkey(privkey) + assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('0.1')) + self.log.info(f"balance: {self.nodes[0].getbalances()}") + # TODO: this code is diversed with bitcoin core, because we don't add `watchonly` in this particular case for 0 balance + if 'watchonly' in self.nodes[0].getbalances(): + assert_equal(self.nodes[0].getbalances()['watchonly']['untrusted_pending'], 0) + self.nodes[0].unloadwallet('w1') + # check importprivkey on fresh wallet + self.nodes[0].createwallet('w2', False, True) + self.nodes[0].importprivkey(privkey) + assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('0.1')) + if __name__ == '__main__': WalletTest().main() diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index 12424c4d7c7f..1a42a8611918 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -20,7 +20,6 @@ """ from test_framework.test_framework import BitcoinTestFramework -from test_framework.governance import EXPECTED_STDERR_NO_GOV_PRUNE from test_framework.util import ( assert_equal, set_node_times, @@ -77,6 +76,7 @@ def check(self, txid=None, amount=None, confirmation_height=None): assert_equal(len(txs), self.expected_txs) addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address']) + if self.expected_txs: assert_equal(len(addresses[0]["txids"]), self.expected_txs) @@ -88,13 +88,18 @@ def check(self, txid=None, amount=None, confirmation_height=None): assert_equal(tx["category"], "receive") assert_equal(tx["label"], self.label) assert_equal(tx["txid"], txid) - assert_equal(tx["confirmations"], 1 + current_height - confirmation_height) - assert "trusted" not in tx + + # If no confirmation height is given, the tx is still in the + # mempool. + confirmations = (1 + current_height - confirmation_height) if confirmation_height else 0 + assert_equal(tx["confirmations"], confirmations) + if confirmations: + assert "trusted" not in tx address, = [ad for ad in addresses if txid in ad["txids"]] assert_equal(address["address"], self.address["address"]) assert_equal(address["amount"], self.expected_balance) - assert_equal(address["confirmations"], 1 + current_height - confirmation_height) + assert_equal(address["confirmations"], confirmations) # Verify the transaction is correctly marked watchonly depending on # whether the transaction pays to an imported public key or # imported private key. The test setup ensures that transaction @@ -153,11 +158,12 @@ def setup_network(self): self.import_deterministic_coinbase_privkeys() self.stop_nodes() - self.start_nodes() + self.start_nodes(extra_args=[["-whitelist=noban@127.0.0.1"]] * self.num_nodes) for i in range(1, self.num_nodes): self.connect_nodes(i, 0) def run_test(self): + # Create one transaction on node 0 with a unique amount for # each possible type of wallet import RPC. for i, variant in enumerate(IMPORT_VARIANTS): @@ -195,7 +201,7 @@ def run_test(self): variant.check() # Create new transactions sending to each address. - for i, variant in enumerate(IMPORT_VARIANTS): + for variant in IMPORT_VARIANTS: variant.sent_amount = get_rand_amount() variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount) self.generate(self.nodes[0], 1) # Generate one block for each send @@ -210,10 +216,45 @@ def run_test(self): variant.expected_balance += variant.sent_amount variant.expected_txs += 1 variant.check(variant.sent_txid, variant.sent_amount, variant.confirmation_height) - for i, import_node in enumerate(IMPORT_NODES, 2): - if import_node.prune: - self.stop_node(i, expected_stderr=EXPECTED_STDERR_NO_GOV_PRUNE) + self.log.info('Test that the mempool is rescanned as well if the rescan parameter is set to true') + + # The late timestamp and pruned variants are not necessary when testing mempool rescan + mempool_variants = [variant for variant in IMPORT_VARIANTS if variant.rescan != Rescan.late_timestamp and not variant.prune] + # No further blocks are mined so the timestamp will stay the same + timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"] + + # Create one transaction on node 0 with a unique amount for + # each possible type of wallet import RPC. + for i, variant in enumerate(mempool_variants): + variant.label = "mempool label {} {}".format(i, variant) + variant.address = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress( + label=variant.label, + )) + variant.key = self.nodes[1].dumpprivkey(variant.address["address"]) + variant.initial_amount = get_rand_amount() + variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount) + variant.confirmation_height = 0 + variant.timestamp = timestamp + + assert_equal(len(self.nodes[0].getrawmempool()), len(mempool_variants)) + self.sync_mempools() + + # For each variation of wallet key import, invoke the import RPC and + # check the results from getbalance and listtransactions. + for variant in mempool_variants: + self.log.info('Run import for mempool variant {}'.format(variant)) + expect_rescan = variant.rescan == Rescan.yes + variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))] + variant.do_import(variant.timestamp) + if expect_rescan: + variant.expected_balance = variant.initial_amount + variant.expected_txs = 1 + variant.check(variant.initial_txid, variant.initial_amount) + else: + variant.expected_balance = 0 + variant.expected_txs = 0 + variant.check() if __name__ == "__main__": diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py index 8c30b042ffb8..9d961c69885b 100755 --- a/test/functional/wallet_importdescriptors.py +++ b/test/functional/wallet_importdescriptors.py @@ -427,7 +427,7 @@ def run_test(self): assert_equal(wmulti_priv.getwalletinfo()['keypoolsize'], 1000) txid = w0.sendtoaddress(addr, 10) self.generate(self.nodes[0], 6) - wmulti_priv.sendtoaddress(w0.getnewaddress(), 8) # uses change 1 + send_txid = wmulti_priv.sendtoaddress(w0.getnewaddress(), 8) # uses change 1 self.sync_all() self.nodes[1].createwallet(wallet_name="wmulti_pub", disable_private_keys=True, blank=True, descriptors=True) @@ -454,7 +454,9 @@ def run_test(self): addr = wmulti_pub.getnewaddress() # uses receive 1 assert_equal(addr, '91cA4fLGaDCr6b9W2c5j1ph9PDpq9WbEhk') # Derived at m/84'/0'/0'/1 change_addr = wmulti_pub.getrawchangeaddress() # uses receive 2 - assert_equal(change_addr, '91WxMwg2NHD1PwHChhbAkeCN6nQ8ikdLEx') + assert_equal(change_addr, '8y2sLiPQnB81bAeiRvwbjozJXnCCNH2nHb') + assert(send_txid in self.nodes[0].getrawmempool(True)) + assert(send_txid in (x['txid'] for x in wmulti_pub.listunspent(0))) assert_equal(wmulti_pub.getwalletinfo()['keypoolsize'], 999) # generate some utxos for next tests From 3538700d65f4223130ee0a7103fb35ec24246923 Mon Sep 17 00:00:00 2001 From: MacroFake Date: Tue, 19 Jul 2022 09:15:55 +0200 Subject: [PATCH 574/656] Merge bitcoin/bitcoin#25466: ci: add unused-using-decls to clang-tidy a02f3f19f52e628248f81acc2410e67f3d49baf5 tidy: use misc-unused-using-decls (fanquake) d6787bc19b1032d3f46a60625105f30199c41b00 refactor: remove unused using directives (fanquake) 3617634324d647956c621db407db6d82a91b91ec validation: remove unused using directives (eugene) Pull request description: Adds https://clang.llvm.org/extra/clang-tidy/checks/misc/unused-using-decls.html to our clang-tidy. PR'd after the discussion in #25433 (which it includes). ACKs for top commit: jamesob: Github ACK https://github.com/bitcoin/bitcoin/pull/25466/commits/a02f3f19f52e628248f81acc2410e67f3d49baf5 Tree-SHA512: 2bb937c1cc90006e69054458d845fb54f287567f4309c773a3fc859f260558c32ff51fc1c2ce9b43207426f3547e7ce226c87186103d741d5efcca19cd355253 --- src/.clang-tidy | 2 ++ src/bench/wallet_loading.cpp | 2 -- src/qt/transactionrecord.cpp | 1 - src/rpc/evo.cpp | 2 -- src/rpc/masternode.cpp | 1 - src/rpc/output_script.cpp | 5 ----- src/test/coinstatsindex_tests.cpp | 3 --- src/test/miniscript_tests.cpp | 2 ++ 8 files changed, 4 insertions(+), 14 deletions(-) diff --git a/src/.clang-tidy b/src/.clang-tidy index 5ac571061309..cc4bb41710a8 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -1,6 +1,7 @@ Checks: ' -*, bugprone-argument-comment, +misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, readability-const-return-type, @@ -9,6 +10,7 @@ readability-redundant-string-init, ' WarningsAsErrors: ' bugprone-argument-comment, +misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, readability-redundant-declaration, diff --git a/src/bench/wallet_loading.cpp b/src/bench/wallet_loading.cpp index 3d8700d5592b..1a2bd7eceb32 100644 --- a/src/bench/wallet_loading.cpp +++ b/src/bench/wallet_loading.cpp @@ -19,8 +19,6 @@ using wallet::CWallet; using wallet::DatabaseFormat; using wallet::DatabaseOptions; -using wallet::ISMINE_SPENDABLE; -using wallet::MakeWalletDatabase; using wallet::TxStateInactive; using wallet::WALLET_FLAG_DESCRIPTORS; using wallet::WalletContext; diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp index 0e550eb1bf9e..3814cd9c1999 100644 --- a/src/qt/transactionrecord.cpp +++ b/src/qt/transactionrecord.cpp @@ -13,7 +13,6 @@ #include -using wallet::ISMINE_ALL; using wallet::ISMINE_SPENDABLE; using wallet::ISMINE_WATCH_ONLY; using wallet::isminetype; diff --git a/src/rpc/evo.cpp b/src/rpc/evo.cpp index da14a2de4cac..44c6f4fda664 100644 --- a/src/rpc/evo.cpp +++ b/src/rpc/evo.cpp @@ -50,8 +50,6 @@ using node::NodeContext; using wallet::CWallet; #ifdef ENABLE_WALLET using wallet::CCoinControl; -using wallet::CoinType; -using wallet::COutput; using wallet::CRecipient; using wallet::DEFAULT_DISABLE_WALLET; using wallet::GetWalletForJSONRPCRequest; diff --git a/src/rpc/masternode.cpp b/src/rpc/masternode.cpp index 3ea189ebbc28..7a8537da3356 100644 --- a/src/rpc/masternode.cpp +++ b/src/rpc/masternode.cpp @@ -36,7 +36,6 @@ using node::ReadBlockFromDisk; #ifdef ENABLE_WALLET using wallet::CCoinControl; using wallet::CoinType; -using wallet::COutput; using wallet::CWallet; using wallet::GetWalletForJSONRPCRequest; #endif // ENABLE_WALLET diff --git a/src/rpc/output_script.cpp b/src/rpc/output_script.cpp index ad26dd77c168..ca5beef1d0f7 100644 --- a/src/rpc/output_script.cpp +++ b/src/rpc/output_script.cpp @@ -23,11 +23,6 @@ #include #include -namespace node { -struct NodeContext; -} -using node::NodeContext; - static RPCHelpMan validateaddress() { return RPCHelpMan{ diff --git a/src/test/coinstatsindex_tests.cpp b/src/test/coinstatsindex_tests.cpp index e3335373231f..b8f5a2b4b9f0 100644 --- a/src/test/coinstatsindex_tests.cpp +++ b/src/test/coinstatsindex_tests.cpp @@ -12,9 +12,6 @@ #include -using kernel::CCoinsStats; -using kernel::CoinStatsHashType; - BOOST_AUTO_TEST_SUITE(coinstatsindex_tests) BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup) diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index 3877fea907e8..46ed47b1d480 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -116,6 +116,8 @@ struct KeyConverter { //! Singleton instance of KeyConverter. const KeyConverter CONVERTER{}; +// https://github.com/llvm/llvm-project/issues/53444 +// NOLINTNEXTLINE(misc-unused-using-decls) using miniscript::operator"" _mst; enum TestMode : int { From 03a8a7e676041b066027c7f595155703e03f31ec Mon Sep 17 00:00:00 2001 From: MacroFake Date: Tue, 30 Aug 2022 20:04:13 +0200 Subject: [PATCH 575/656] Merge bitcoin/bitcoin#25733: tidy: enable bugprone-use-after-move f345dc3960c2cf4d69ebbcc011e4e836205f0361 tidy: enable bugprone-use-after-move (fanquake) 94f2235f858bc4fdaf0ab0882599f6a228401cf5 test: work around bugprone-use-after-move warnings in util tests (fanquake) Pull request description: Would have caught #25640. Currently `// NOLINT`s around: ```bash test/util_tests.cpp:2513:34: error: 't2' used after it was moved [bugprone-use-after-move,-warnings-as-errors] BOOST_CHECK(v2[0].origin == &t2); ^ test/util_tests.cpp:2511:15: note: move occurred here auto v2 = Vector(std::move(t2)); ^ test/util_tests.cpp:2519:34: error: 't2' used after it was moved [bugprone-use-after-move,-warnings-as-errors] BOOST_CHECK(v3[1].origin == &t2); ^ test/util_tests.cpp:2516:15: note: move occurred here auto v3 = Vector(t1, std::move(t2)); ^ test/util_tests.cpp:2527:34: error: 't3' used after it was moved [bugprone-use-after-move,-warnings-as-errors] BOOST_CHECK(v4[2].origin == &t3); ^ test/util_tests.cpp:2523:15: note: move occurred here auto v4 = Vector(std::move(v3[0]), v3[1], std::move(t3)); ``` See: https://releases.llvm.org/14.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/bugprone-use-after-move.html ACKs for top commit: ryanofsky: Code review ACK f345dc3960c2cf4d69ebbcc011e4e836205f0361. Only change since last review is switching to NOLINT directives Tree-SHA512: afadecbaf1069653f4be5d6e66a5800ffd975c0b1a960057abc6367b616c181cd518897a874a8f3fd5e5e1f45fcc165f7a9a3171136cd4deee641214c4b765b8 --- src/.clang-tidy | 2 ++ src/test/util_tests.cpp | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/.clang-tidy b/src/.clang-tidy index cc4bb41710a8..3096da6737f7 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -1,6 +1,7 @@ Checks: ' -*, bugprone-argument-comment, +bugprone-use-after-move, misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, @@ -10,6 +11,7 @@ readability-redundant-string-init, ' WarningsAsErrors: ' bugprone-argument-comment, +bugprone-use-after-move, misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index ae2778ac5690..6d5c7be2c019 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1304,13 +1304,13 @@ BOOST_AUTO_TEST_CASE(test_tracked_vector) auto v2 = Vector(std::move(t2)); BOOST_CHECK_EQUAL(v2.size(), 1U); - BOOST_CHECK(v2[0].origin == &t2); + BOOST_CHECK(v2[0].origin == &t2); // NOLINT(*-use-after-move) BOOST_CHECK_EQUAL(v2[0].copies, 0); auto v3 = Vector(t1, std::move(t2)); BOOST_CHECK_EQUAL(v3.size(), 2U); BOOST_CHECK(v3[0].origin == &t1); - BOOST_CHECK(v3[1].origin == &t2); + BOOST_CHECK(v3[1].origin == &t2); // NOLINT(*-use-after-move) BOOST_CHECK_EQUAL(v3[0].copies, 1); BOOST_CHECK_EQUAL(v3[1].copies, 0); @@ -1318,7 +1318,7 @@ BOOST_AUTO_TEST_CASE(test_tracked_vector) BOOST_CHECK_EQUAL(v4.size(), 3U); BOOST_CHECK(v4[0].origin == &t1); BOOST_CHECK(v4[1].origin == &t2); - BOOST_CHECK(v4[2].origin == &t3); + BOOST_CHECK(v4[2].origin == &t3); // NOLINT(*-use-after-move) BOOST_CHECK_EQUAL(v4[0].copies, 1); BOOST_CHECK_EQUAL(v4[1].copies, 1); BOOST_CHECK_EQUAL(v4[2].copies, 0); From 0e7d0e222043b9710587ceb0cc632ea602c3506d Mon Sep 17 00:00:00 2001 From: MacroFake Date: Fri, 19 Aug 2022 17:10:59 +0200 Subject: [PATCH 576/656] partial Merge bitcoin/bitcoin#25707: refactor: Make const references to avoid unnecessarily copying objects and enable two clang-tidy checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BACKPORT NOTE: Somehow there's multiple warnings that are for backported code, mostly related to mempool, such as: rpc/mempool.cpp:549:33: error: loop variable is copied but only used as const reference; consider making it a const reference [performance-for-range-copy,-warnings-as-errors] 549 | for (CTxMemPool::txiter descendantIt : setDescendants) { | ^ | const & I can't find a way to fix it without big diversity of codebase, so, make as partial temporary ae7ae36d311a869b3bda41d29dc0e47fade77d28 tidy: Enable two clang-tidy checks (Aurèle Oulès) 081b0e53e3adca7ea57d23e5fcd9db4b86415a72 refactor: Make const refs vars where applicable (Aurèle Oulès) Pull request description: I added const references to some variables to avoid unnecessarily copying objects. Also added two clang-tidy checks : [performance-for-range-copy](https://releases.llvm.org/11.1.0/tools/clang/tools/extra/docs/clang-tidy/checks/performance-for-range-copy.html) and [performance-unnecessary-copy-initialization](https://releases.llvm.org/12.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/performance-unnecessary-copy-initialization.html). ACKs for top commit: vasild: ACK ae7ae36d311a869b3bda41d29dc0e47fade77d28 MarcoFalke: review ACK ae7ae36d311a869b3bda41d29dc0e47fade77d28 Tree-SHA512: f6ac6b0cd0eee1e0c34d2f186484bc0f7ec6071451cccb33fa88a67d93d92b304e2fac378b88f087e94657745bca4e966dbc443759587400eb01b1f3061fde8c --- src/.clang-tidy | 3 +++ src/bench/load_external.cpp | 2 +- src/bitcoin-cli.cpp | 2 +- src/bitcoin-tx.cpp | 2 +- src/blockfilter.cpp | 2 +- src/bls/bls_worker.cpp | 2 +- src/coins.cpp | 2 +- src/core_read.cpp | 2 +- src/evo/creditpool.cpp | 2 +- src/evo/mnauth.cpp | 4 +--- src/init.cpp | 2 +- src/logging.cpp | 2 +- src/netbase.cpp | 6 +++--- src/node/blockstorage.cpp | 2 +- src/qt/governancelist.cpp | 10 +++++----- src/qt/splashscreen.cpp | 5 ++--- src/rpc/coinjoin.cpp | 2 +- src/rpc/mining.cpp | 6 +++--- src/rpc/rawtransaction_util.cpp | 4 ++-- src/rpc/util.cpp | 6 +++--- src/test/base58_tests.cpp | 4 ++-- src/test/blockfilter_tests.cpp | 2 +- src/test/evo_deterministicmns_tests.cpp | 3 ++- src/test/evo_trivialvalidation.cpp | 2 +- src/test/key_io_tests.cpp | 6 +++--- src/test/script_tests.cpp | 2 +- src/test/sighash_tests.cpp | 2 +- src/test/transaction_tests.cpp | 8 ++++---- src/test/validation_block_tests.cpp | 2 +- src/wallet/bdb.cpp | 2 +- src/wallet/receive.cpp | 2 +- src/wallet/scriptpubkeyman.cpp | 2 +- src/wallet/test/bip39_tests.cpp | 2 +- src/wallet/walletdb.cpp | 6 +++--- 34 files changed, 57 insertions(+), 56 deletions(-) diff --git a/src/.clang-tidy b/src/.clang-tidy index 3096da6737f7..42602b69749b 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -5,6 +5,8 @@ bugprone-use-after-move, misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, +performance-for-range-copy, +performance-unnecessary-copy-initialization, readability-const-return-type, readability-redundant-declaration, readability-redundant-string-init, @@ -15,6 +17,7 @@ bugprone-use-after-move, misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, +performance-unnecessary-copy-initialization, readability-redundant-declaration, readability-redundant-string-init, ' diff --git a/src/bench/load_external.cpp b/src/bench/load_external.cpp index 68d7895f49b4..2d691484a3bd 100644 --- a/src/bench/load_external.cpp +++ b/src/bench/load_external.cpp @@ -28,7 +28,7 @@ static void LoadExternalBlockFile(benchmark::Bench& bench) // block data) as a stream object. const fs::path blkfile{testing_setup.get()->m_path_root / "blk.dat"}; CDataStream ss(SER_DISK, 0); - auto params{Params()}; + const auto& params{Params()}; ss << params.MessageStart(); ss << static_cast(benchmark::data::block813851.size()); // We can't use the streaming serialization (ss << benchmark::data::block813851) diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 3e94ea5a2689..2f3b807131e9 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -948,7 +948,7 @@ static void GetWalletBalances(UniValue& result) UniValue balances(UniValue::VOBJ); for (const UniValue& wallet : wallets.getValues()) { - const std::string wallet_name = wallet.get_str(); + const std::string& wallet_name = wallet.get_str(); const UniValue getbalances = ConnectAndCallRPC(&rh, "getbalances", /* args=*/{}, wallet_name); const UniValue& balance = getbalances.find_value("result")["mine"]["trusted"]; balances.pushKV(wallet_name, balance); diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index 9c9ccdae59dc..f256516e2c08 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -555,7 +555,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr) UniValue prevtxsObj = registers["prevtxs"]; { for (unsigned int previdx = 0; previdx < prevtxsObj.size(); previdx++) { - UniValue prevOut = prevtxsObj[previdx]; + const UniValue& prevOut = prevtxsObj[previdx]; if (!prevOut.isObject()) throw std::runtime_error("expected prevtxs internal object"); diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp index f4638dabcc0d..364ed1c7ec72 100644 --- a/src/blockfilter.cpp +++ b/src/blockfilter.cpp @@ -170,7 +170,7 @@ const std::set& AllBlockFilterTypes() static std::once_flag flag; std::call_once(flag, []() { - for (auto entry : g_filter_types) { + for (const auto& entry : g_filter_types) { types.insert(entry.first); } }); diff --git a/src/bls/bls_worker.cpp b/src/bls/bls_worker.cpp index 1a65899be450..44f15e46c22b 100644 --- a/src/bls/bls_worker.cpp +++ b/src/bls/bls_worker.cpp @@ -15,7 +15,7 @@ template bool VerifyVectorHelper(Span vec) { std::set set; - for (auto item : vec) { + for (const auto& item : vec) { if (!item.IsValid()) return false; // check duplicates diff --git a/src/coins.cpp b/src/coins.cpp index 0aa3bf708484..d43e886b7603 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -356,7 +356,7 @@ bool CCoinsViewErrorCatcher::GetCoin(const COutPoint &outpoint, Coin &coin) cons try { return CCoinsViewBacked::GetCoin(outpoint, coin); } catch(const std::runtime_error& e) { - for (auto f : m_err_callbacks) { + for (const auto& f : m_err_callbacks) { f(); } LogPrintf("Error reading from database: %s\n", e.what()); diff --git a/src/core_read.cpp b/src/core_read.cpp index 9f7c198d3f9e..a6e362603aac 100644 --- a/src/core_read.cpp +++ b/src/core_read.cpp @@ -189,7 +189,7 @@ int ParseSighashString(const UniValue& sighash) {std::string("SINGLE"), int(SIGHASH_SINGLE)}, {std::string("SINGLE|ANYONECANPAY"), int(SIGHASH_SINGLE|SIGHASH_ANYONECANPAY)}, }; - std::string strHashType = sighash.get_str(); + const std::string& strHashType = sighash.get_str(); const auto& it = map_sighash_values.find(strHashType); if (it != map_sighash_values.end()) { hash_type = it->second; diff --git a/src/evo/creditpool.cpp b/src/evo/creditpool.cpp index 7a82653b4bc5..772ac70630ac 100644 --- a/src/evo/creditpool.cpp +++ b/src/evo/creditpool.cpp @@ -91,7 +91,7 @@ static std::optional GetCreditDataFromBlock(const gsl::n LogPrintf("%s: WARNING: No valid CbTx at height=%d\n", __func__, block_index->nHeight); return std::nullopt; } - for (CTransactionRef tx : block.vtx) { + for (const CTransactionRef& tx : block.vtx) { if (!tx->IsSpecialTxVersion() || tx->nType != TRANSACTION_ASSET_UNLOCK) continue; CAmount unlocked{0}; diff --git a/src/evo/mnauth.cpp b/src/evo/mnauth.cpp index 4d813ebbbf00..caba4172e567 100644 --- a/src/evo/mnauth.cpp +++ b/src/evo/mnauth.cpp @@ -36,9 +36,7 @@ void CMNAuth::PushMNAUTH(CNode& peer, CConnman& connman, const CActiveMasternode if (Params().NetworkIDString() != CBaseChainParams::MAIN && gArgs.IsArgSet("-pushversion")) { nOurNodeVersion = gArgs.GetIntArg("-pushversion", PROTOCOL_VERSION); } - auto pk = mn_activeman.GetPubKey(); - const CBLSPublicKey pubKey(pk); - const uint256 signHash{::SerializeHash(std::make_tuple(pubKey, receivedMNAuthChallenge, peer.IsInboundConn(), nOurNodeVersion))}; + const uint256 signHash{::SerializeHash(std::make_tuple(mn_activeman.GetPubKey(), receivedMNAuthChallenge, peer.IsInboundConn(), nOurNodeVersion))}; mnauth.proRegTxHash = mn_activeman.GetProTxHash(); diff --git a/src/init.cpp b/src/init.cpp index 642df953f34f..c831ed4986a8 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1440,7 +1440,7 @@ bool AppInitParameterInteraction(const ArgsManager& args) static bool LockDataDirectory(bool probeOnly) { // Make sure only a single Dash Core process is using the data directory. - fs::path datadir = gArgs.GetDataDirNet(); + const fs::path& datadir = gArgs.GetDataDirNet(); if (!DirIsWritable(datadir)) { return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), fs::PathToString(datadir))); } diff --git a/src/logging.cpp b/src/logging.cpp index 293ac4463d34..d59d1a86aa9b 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -462,7 +462,7 @@ void BCLog::Logger::LogPrintStr(const std::string& str, const std::string& loggi } if (m_log_threadnames && m_started_new_line) { - const auto threadname = util::ThreadGetInternalName(); + const auto& threadname = util::ThreadGetInternalName(); // 16 chars total, "dash-" is 5 of them and another 1 is a NUL terminator str_prefixed.insert(0, "[" + strprintf("%10s", (threadname.empty() ? "unknown" : threadname)) + "] "); } diff --git a/src/netbase.cpp b/src/netbase.cpp index 7ac84ad4e3d0..b1a4f7750076 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -390,7 +390,7 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a return error("Error sending to proxy"); } uint8_t pchRet1[2]; - if ((recvr = InterruptibleRecv(pchRet1, 2, g_socks5_recv_timeout, sock)) != IntrRecvError::OK) { + if (InterruptibleRecv(pchRet1, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { LogPrintf("Socks5() connect to %s:%d failed: InterruptibleRecv() timeout or other failure\n", strDest, port); return false; } @@ -413,7 +413,7 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a } LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password); uint8_t pchRetA[2]; - if ((recvr = InterruptibleRecv(pchRetA, 2, g_socks5_recv_timeout, sock)) != IntrRecvError::OK) { + if (InterruptibleRecv(pchRetA, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { return error("Error reading proxy authentication response"); } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { @@ -479,7 +479,7 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a if (recvr != IntrRecvError::OK) { return error("Error reading from proxy"); } - if ((recvr = InterruptibleRecv(pchRet3, 2, g_socks5_recv_timeout, sock)) != IntrRecvError::OK) { + if (InterruptibleRecv(pchRet3, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { return error("Error reading from proxy"); } LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest); diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 707acd044c28..76fd09843ac7 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -448,7 +448,7 @@ void CleanupBlockRevFiles() // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n"); - fs::path blocksdir = gArgs.GetBlocksDirPath(); + const fs::path& blocksdir = gArgs.GetBlocksDirPath(); for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { const std::string path = fs::PathToString(it->path().filename()); if (fs::is_regular_file(*it) && diff --git a/src/qt/governancelist.cpp b/src/qt/governancelist.cpp index 49c6abe6e628..37f61fca4a06 100644 --- a/src/qt/governancelist.cpp +++ b/src/qt/governancelist.cpp @@ -43,23 +43,23 @@ Proposal::Proposal(ClientModel* _clientModel, const CGovernanceObject& _govObj, { UniValue prop_data; if (prop_data.read(govObj.GetDataAsPlainString())) { - if (UniValue titleValue = prop_data.find_value("name"); titleValue.isStr()) { + if (const UniValue& titleValue = prop_data.find_value("name"); titleValue.isStr()) { m_title = QString::fromStdString(titleValue.get_str()); } - if (UniValue paymentStartValue = prop_data.find_value("start_epoch"); paymentStartValue.isNum()) { + if (const UniValue& paymentStartValue = prop_data.find_value("start_epoch"); paymentStartValue.isNum()) { m_startDate = QDateTime::fromSecsSinceEpoch(paymentStartValue.getInt()); } - if (UniValue paymentEndValue = prop_data.find_value("end_epoch"); paymentEndValue.isNum()) { + if (const UniValue& paymentEndValue = prop_data.find_value("end_epoch"); paymentEndValue.isNum()) { m_endDate = QDateTime::fromSecsSinceEpoch(paymentEndValue.getInt()); } - if (UniValue amountValue = prop_data.find_value("payment_amount"); amountValue.isNum()) { + if (const UniValue& amountValue = prop_data.find_value("payment_amount"); amountValue.isNum()) { m_paymentAmount = amountValue.get_real(); } - if (UniValue urlValue = prop_data.find_value("url"); urlValue.isStr()) { + if (const UniValue& urlValue = prop_data.find_value("url"); urlValue.isStr()) { m_url = QString::fromStdString(urlValue.get_str()); } } diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index cb92b6d9e2f5..f82f71c2c264 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -55,7 +55,7 @@ SplashScreen::SplashScreen(const NetworkStyle *networkStyle) : // define text to place QString titleText = PACKAGE_NAME; QString versionText = QString::fromStdString(FormatFullVersion()).remove(0, 1); - QString titleAddText = networkStyle->getTitleAddText(); + const QString& titleAddText = networkStyle->getTitleAddText(); QFont fontNormal = GUIUtil::getFontNormal(); QFont fontBold = GUIUtil::getFontBold(); @@ -116,8 +116,7 @@ SplashScreen::SplashScreen(const NetworkStyle *networkStyle) : int titleAddTextWidth = GUIUtil::TextWidth(fm, titleAddText); // Draw the badge background with the network-specific color QRect badgeRect = QRect(width - titleAddTextWidth - 20, 5, width, fm.height() + 10); - QColor badgeColor = networkStyle->getBadgeColor(); - pixPaint.fillRect(badgeRect, badgeColor); + pixPaint.fillRect(badgeRect, networkStyle->getBadgeColor()); // Draw the text itself using white color, regardless of the current theme pixPaint.setPen(QColor(255, 255, 255)); pixPaint.drawText(width - titleAddTextWidth - 10, paddingTop + 10, titleAddText); diff --git a/src/rpc/coinjoin.cpp b/src/rpc/coinjoin.cpp index 0c9b16bd634b..d3dd399dd35d 100644 --- a/src/rpc/coinjoin.cpp +++ b/src/rpc/coinjoin.cpp @@ -173,7 +173,7 @@ static RPCHelpMan coinjoin_status() } UniValue ret(UniValue::VARR); - for (auto str_status : cj_clientman->getSessionStatuses()) { + for (const auto& str_status : cj_clientman->getSessionStatuses()) { ret.push_back(str_status); } return ret; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 11e6cd906d45..6e372c75cea0 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -131,7 +131,7 @@ static bool GenerateBlock(ChainstateManager& chainman, CBlock& block, uint64_t& block_hash.SetNull(); block.hashMerkleRoot = BlockMerkleRoot(block); - CChainParams chainparams(Params()); + const CChainParams& chainparams(Params()); while (max_tries > 0 && block.nNonce < std::numeric_limits::max() && !CheckProofOfWork(block.GetHash(), block.nBits, chainparams.GetConsensus()) && !ShutdownRequested()) { ++block.nNonce; @@ -356,7 +356,7 @@ static RPCHelpMan generateblock() } } - CChainParams chainparams(Params()); + const CChainParams& chainparams(Params()); const LLMQContext& llmq_ctx = EnsureLLMQContext(node); ChainstateManager& chainman = EnsureChainman(node); @@ -755,7 +755,7 @@ static RPCHelpMan getblocktemplate() if (lpval.isStr()) { // Format: - std::string lpstr = lpval.get_str(); + const std::string& lpstr = lpval.get_str(); hashWatchedChain = ParseHashV(lpstr.substr(0, 64), "longpollid"); nTransactionsUpdatedLastLP = LocaleIndependentAtoi(lpstr.substr(64)); diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp index e66a5b2d3aa9..8ab727004650 100644 --- a/src/rpc/rawtransaction_util.cpp +++ b/src/rpc/rawtransaction_util.cpp @@ -146,14 +146,14 @@ static void TxInErrorToJSON(const CTxIn& txin, UniValue& vErrorsRet, const std:: void ParsePrevouts(const UniValue& prevTxsUnival, FillableSigningProvider* keystore, std::map& coins) { if (!prevTxsUnival.isNull()) { - UniValue prevTxs = prevTxsUnival.get_array(); + const UniValue& prevTxs = prevTxsUnival.get_array(); for (unsigned int idx = 0; idx < prevTxs.size(); ++idx) { const UniValue& p = prevTxs[idx]; if (!p.isObject()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "expected object with {\"txid'\",\"vout\",\"scriptPubKey\"}"); } - UniValue prevOut = p.get_obj(); + const UniValue& prevOut = p.get_obj(); RPCTypeCheckObj(prevOut, { diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index da552c410f5f..3f3b852422f1 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -99,7 +99,7 @@ CAmount AmountFromValue(const UniValue& value, int decimals) uint256 ParseHashV(const UniValue& v, std::string strName) { - std::string strHex{v.get_str()}; + const std::string& strHex(v.get_str()); if (64 != strHex.length()) throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s must be of length %d (not %d, for '%s')", strName, 64, strHex.length(), strHex)); if (!IsHex(strHex)) // Note: IsHex("") is false @@ -126,7 +126,7 @@ std::vector ParseHexO(const UniValue& o, std::string strKey) int32_t ParseInt32V(const UniValue& v, const std::string &strName) { - std::string strNum = v.getValStr(); + const std::string& strNum = v.getValStr(); int32_t num; if (!ParseInt32(strNum, &num)) throw JSONRPCError(RPC_INVALID_PARAMETER, strName+" must be a 32bit integer (not '"+strNum+"')"); @@ -135,7 +135,7 @@ int32_t ParseInt32V(const UniValue& v, const std::string &strName) int64_t ParseInt64V(const UniValue& v, const std::string &strName) { - std::string strNum = v.getValStr(); + const std::string& strNum = v.getValStr(); int64_t num; if (!ParseInt64(strNum, &num)) throw JSONRPCError(RPC_INVALID_PARAMETER, strName+" must be a 64bit integer (not '"+strNum+"')"); diff --git a/src/test/base58_tests.cpp b/src/test/base58_tests.cpp index fbe873ef3237..2cd1ae9510d2 100644 --- a/src/test/base58_tests.cpp +++ b/src/test/base58_tests.cpp @@ -24,7 +24,7 @@ BOOST_AUTO_TEST_CASE(base58_EncodeBase58) { UniValue tests = read_json(std::string(json_tests::base58_encode_decode, json_tests::base58_encode_decode + sizeof(json_tests::base58_encode_decode))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 2) // Allow for extra stuff (useful for comments) { @@ -46,7 +46,7 @@ BOOST_AUTO_TEST_CASE(base58_DecodeBase58) std::vector result; for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 2) // Allow for extra stuff (useful for comments) { diff --git a/src/test/blockfilter_tests.cpp b/src/test/blockfilter_tests.cpp index 54ab568e7d3a..b33cca137738 100644 --- a/src/test/blockfilter_tests.cpp +++ b/src/test/blockfilter_tests.cpp @@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(blockfilters_json_test) const UniValue& tests = json.get_array(); for (unsigned int i = 0; i < tests.size(); i++) { - UniValue test = tests[i]; + const UniValue& test = tests[i]; std::string strTest = test.write(); if (test.size() == 1) { diff --git a/src/test/evo_deterministicmns_tests.cpp b/src/test/evo_deterministicmns_tests.cpp index c67d4dd40725..8d27e75aec0f 100644 --- a/src/test/evo_deterministicmns_tests.cpp +++ b/src/test/evo_deterministicmns_tests.cpp @@ -886,8 +886,9 @@ static void SmlCache(TestChainSetup& setup) CDeterministicMNList mn_list_1(emptyList); BOOST_CHECK(sml_empty == mn_list_1.to_sml()); + CDeterministicMNList mn_list_2; // Assigning list should return the same cached object - CDeterministicMNList mn_list_2 = emptyList; + mn_list_2 = emptyList; BOOST_CHECK(sml_empty == mn_list_2.to_sml()); auto dmn = create_mock_mn(1); diff --git a/src/test/evo_trivialvalidation.cpp b/src/test/evo_trivialvalidation.cpp index 55312417d9ef..df076eadca21 100644 --- a/src/test/evo_trivialvalidation.cpp +++ b/src/test/evo_trivialvalidation.cpp @@ -48,7 +48,7 @@ void trivialvalidation_runner(const CChain& active_chain, const std::string& jso const UniValue vectors = read_json(json); for (size_t idx = 1; idx < vectors.size(); idx++) { - UniValue test = vectors[idx]; + const UniValue& test = vectors[idx]; uint256 txHash; std::string txType; CMutableTransaction tx; diff --git a/src/test/key_io_tests.cpp b/src/test/key_io_tests.cpp index d01e4cb6fbe7..827c6cc71e18 100644 --- a/src/test/key_io_tests.cpp +++ b/src/test/key_io_tests.cpp @@ -28,7 +28,7 @@ BOOST_AUTO_TEST_CASE(key_io_valid_parse) SelectParams(CBaseChainParams::MAIN); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 3) { // Allow for extra stuff (useful for comments) BOOST_ERROR("Bad test: " << strTest); @@ -86,7 +86,7 @@ BOOST_AUTO_TEST_CASE(key_io_valid_gen) UniValue tests = read_json(std::string(json_tests::key_io_valid, json_tests::key_io_valid + sizeof(json_tests::key_io_valid))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 3) // Allow for extra stuff (useful for comments) { @@ -126,7 +126,7 @@ BOOST_AUTO_TEST_CASE(key_io_invalid) CTxDestination destination; for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 1) // Allow for extra stuff (useful for comments) { diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 06968ae7c5fd..100f5300724c 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -881,7 +881,7 @@ BOOST_AUTO_TEST_CASE(script_json_test) UniValue tests = read_json(std::string(json_tests::script_tests, json_tests::script_tests + sizeof(json_tests::script_tests))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 4) // Allow size > 3; extra stuff ignored (useful for comments) { diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 2a62c16c90d4..f1d8e6d0ad90 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -161,7 +161,7 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) UniValue tests = read_json(std::string(json_tests::sighash, json_tests::sighash + sizeof(json_tests::sighash))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test.size() < 1) // Allow for extra stuff (useful for comments) { diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 5a61438d9dd0..6e117ad5117b 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -153,7 +153,7 @@ BOOST_AUTO_TEST_CASE(tx_valid) UniValue tests = read_json(std::string(json_tests::tx_valid, json_tests::tx_valid + sizeof(json_tests::tx_valid))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test[0].isArray()) { @@ -172,7 +172,7 @@ BOOST_AUTO_TEST_CASE(tx_valid) fValid = false; break; } - UniValue vinput = input.get_array(); + const UniValue& vinput = input.get_array(); if (vinput.size() != 3) { fValid = false; @@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid) UniValue tests = read_json(std::string(json_tests::tx_invalid, json_tests::tx_invalid + sizeof(json_tests::tx_invalid))); for (unsigned int idx = 0; idx < tests.size(); idx++) { - UniValue test = tests[idx]; + const UniValue& test = tests[idx]; std::string strTest = test.write(); if (test[0].isArray()) { @@ -256,7 +256,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid) fValid = false; break; } - UniValue vinput = input.get_array(); + const UniValue& vinput = input.get_array(); if (vinput.size() != 3) { fValid = false; diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index 99a6109fee68..9e83bdf6d320 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -182,7 +182,7 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) } // to make sure that eventually we process the full chain - do it here - for (auto block : blocks) { + for (const auto& block : blocks) { if (block->vtx.size() == 1) { bool processed = Assert(m_node.chainman)->ProcessNewBlock(Params(), block, true, &ignored); assert(processed); diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 65fb838f655c..c5c3870fd6fa 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -443,7 +443,7 @@ void BerkeleyEnvironment::ReloadDbEnv() }); std::vector filenames; - for (auto it : m_databases) { + for (const auto& it : m_databases) { filenames.push_back(it.first); } // Close the individual Db's diff --git a/src/wallet/receive.cpp b/src/wallet/receive.cpp index a5d0bcc59988..43bf3b633529 100644 --- a/src/wallet/receive.cpp +++ b/src/wallet/receive.cpp @@ -428,7 +428,7 @@ std::set< std::set > GetAddressGroupings(const CWallet& wallet) std::set< std::set* > uniqueGroupings; // a set of pointers to groups of addresses std::map< CTxDestination, std::set* > setmap; // map addresses to the unique group containing it - for (std::set _grouping : groupings) + for (const std::set& _grouping : groupings) { // make a set of all the groups hit by this new group std::set< std::set* > hits; diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 947efc82715c..f08852e3207f 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -1926,7 +1926,7 @@ std::map DescriptorScriptPubKeyMan::GetKeys() const AssertLockHeld(cs_desc_man); if (m_storage.HasEncryptionKeys() && !m_storage.IsLocked(true)) { KeyMap keys; - for (auto key_pair : m_map_crypted_keys) { + for (const auto& key_pair : m_map_crypted_keys) { const CPubKey& pubkey = key_pair.second.first; const std::vector& crypted_secret = key_pair.second.second; CKey key; diff --git a/src/wallet/test/bip39_tests.cpp b/src/wallet/test/bip39_tests.cpp index e15d30f8fc98..6c6ee703bcb8 100644 --- a/src/wallet/test/bip39_tests.cpp +++ b/src/wallet/test/bip39_tests.cpp @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(bip39_vectors) for (unsigned int i = 0; i < tests.size(); i++) { // printf("%d\n", i); - UniValue test = tests[i]; + const UniValue& test = tests[i]; std::string strTest = test.write(); if (test.size() < 4) // Allow for extra stuff (useful for comments) { diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index e2e4452d64da..a850b16d236c 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -884,14 +884,14 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) } // Set the descriptor caches - for (auto desc_cache_pair : wss.m_descriptor_caches) { + for (const auto& desc_cache_pair : wss.m_descriptor_caches) { auto spk_man = pwallet->GetScriptPubKeyMan(desc_cache_pair.first); assert(spk_man); ((DescriptorScriptPubKeyMan*)spk_man)->SetCache(desc_cache_pair.second); } // Set the descriptor keys - for (auto desc_key_pair : wss.m_descriptor_keys) { + for (const auto& desc_key_pair : wss.m_descriptor_keys) { auto spk_man = pwallet->GetScriptPubKeyMan(desc_key_pair.first.first); auto it = wss.mnemonics.find(desc_key_pair.first); if (it == wss.mnemonics.end()) { @@ -901,7 +901,7 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) } } - for (auto desc_key_pair : wss.m_descriptor_crypt_keys) { + for (const auto& desc_key_pair : wss.m_descriptor_crypt_keys) { auto spk_man = pwallet->GetScriptPubKeyMan(desc_key_pair.first.first); auto it = wss.crypted_mnemonics.find(desc_key_pair.first); if (it == wss.crypted_mnemonics.end()) { From 887d7b31d35bc2c5d97d3e87fe9ed007a8200813 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 26 Jul 2022 10:03:58 +0100 Subject: [PATCH 577/656] Merge bitcoin/bitcoin#25701: fix comment spellings from the codespell lint 850b0850ccacc4e4f7e82ce2291a111132eae756 fix comment spellings from the codespell lint (Greg Weber) Pull request description: test/lint/all-lint.py includes the codespell lint ACKs for top commit: aureleoules: ACK 850b0850ccacc4e4f7e82ce2291a111132eae756. Tree-SHA512: bf63690da2652886e705d6594903bab67ff0f35a0e5a5505f063827f5148ebce47681e541cbe0e52396baf1addb25d9fe50e5faa9176456f579a7cd2f1321c44 --- src/test/fuzz/txorphan.cpp | 2 +- src/wallet/test/wallet_tests.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index 25244771dfa9..d89c06ba0a14 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -69,7 +69,7 @@ FUZZ_TARGET(txorphan, .init = initialize_orphanage) for (uint32_t i = 0; i < num_out; i++) { tx_mut.vout.emplace_back(CAmount{0}, CScript{}); } - // restore previously poped outpoints + // restore previously popped outpoints for (auto& in : tx_mut.vin) { outpoints.push_back(in.prevout); } diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index a6dfad77ce47..f219766a87bb 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -795,7 +795,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) promise.set_value(); SyncWithValidationInterfaceQueue(); // AddToWallet events for block_tx and mempool_tx events are counted a - // second time as the notificaiton queue is processed + // second time as the notification queue is processed BOOST_CHECK_EQUAL(addtx_count, 4); TestUnloadWallet(context, std::move(wallet)); From fdddfc1fb0db6a1092936972000fe83ec31f5f81 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 31 Aug 2022 08:32:25 +0100 Subject: [PATCH 578/656] Merge bitcoin/bitcoin#25872: Fix issues when calling std::move(const&) BACKPORT NOTE: Some missing changes for TRDescriptor and for mempool_args But they will be caugth by linter ---- fa875349e22f2f0f9c2c98ee991372d08ff90318 Fix iwyu (MacroFake) faad673716cfbad1e715f1bdf8ac00938a055aea Fix issues when calling std::move(const&) (MacroFake) Pull request description: Passing a symbol to `std::move` that is marked `const` is a no-op, which can be fixed in two ways: * Remove the `const`, or * Remove the `std::move` ACKs for top commit: ryanofsky: Code review ACK fa875349e22f2f0f9c2c98ee991372d08ff90318. Looks good. Good for univalue to support c++11 move optimizations Tree-SHA512: 3dc5cad55b93cfa311abedfb811f35fc1b7f30a1c68561f15942438916c7de25e179c364be11881e01f844f9c2ccd71a3be55967ad5abd2f35b10bb7a882edea --- ci/dash/lint-tidy.sh | 1 + src/.clang-tidy | 5 +++++ src/evo/specialtxman.cpp | 2 +- src/instantsend/instantsend.cpp | 2 +- src/llmq/blockprocessor.cpp | 2 +- src/llmq/snapshot.cpp | 5 ++--- src/rpc/blockchain.cpp | 2 +- src/script/descriptor.cpp | 4 ++-- src/test/fuzz/txorphan.cpp | 1 - src/univalue/include/univalue.h | 8 ++++---- src/univalue/lib/univalue.cpp | 20 ++++++++++---------- src/util/message.cpp | 1 - src/util/strencodings.cpp | 1 - src/util/string.cpp | 3 +-- src/util/string.h | 1 - src/util/threadinterrupt.h | 1 + src/wallet/spend.cpp | 2 +- 17 files changed, 31 insertions(+), 30 deletions(-) diff --git a/ci/dash/lint-tidy.sh b/ci/dash/lint-tidy.sh index b186650db63e..c4d9ab5ed431 100755 --- a/ci/dash/lint-tidy.sh +++ b/ci/dash/lint-tidy.sh @@ -44,6 +44,7 @@ iwyu_tool.py \ "src/util/moneystr.cpp" \ "src/util/serfloat.cpp" \ "src/util/spanparsing.cpp" \ + "src/util/string.cpp" \ "src/util/strencodings.cpp" \ "src/util/syserror.cpp" \ "src/util/url.cpp" \ diff --git a/src/.clang-tidy b/src/.clang-tidy index 42602b69749b..76978140ecd7 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -6,6 +6,7 @@ misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, performance-for-range-copy, +performance-move-const-arg, performance-unnecessary-copy-initialization, readability-const-return-type, readability-redundant-declaration, @@ -17,7 +18,11 @@ bugprone-use-after-move, misc-unused-using-decls, modernize-use-default-member-init, modernize-use-nullptr, +performance-move-const-arg, performance-unnecessary-copy-initialization, readability-redundant-declaration, readability-redundant-string-init, ' +CheckOptions: + - key: performance-move-const-arg.CheckTriviallyCopyableMove + value: false diff --git a/src/evo/specialtxman.cpp b/src/evo/specialtxman.cpp index f5bd3f810117..0852f97ce607 100644 --- a/src/evo/specialtxman.cpp +++ b/src/evo/specialtxman.cpp @@ -503,7 +503,7 @@ bool CSpecialTxProcessor::RebuildListFromBlock(const CBlock& block, gsl::not_nul newList.UpdateMN(dmn.proTxHash, newState); }); - mnListRet = std::move(newList); + mnListRet = newList; return true; } diff --git a/src/instantsend/instantsend.cpp b/src/instantsend/instantsend.cpp index 68766bf0c71f..495eefd68119 100644 --- a/src/instantsend/instantsend.cpp +++ b/src/instantsend/instantsend.cpp @@ -101,7 +101,7 @@ instantsend::PendingState CInstantSendManager::FetchPendingLocks() std::vector removed; removed.reserve(std::min(maxCount, pendingInstantSendLocks.size())); - for (const auto& [islockHash, nodeid_islptr_pair] : pendingInstantSendLocks) { + for (auto& [islockHash, nodeid_islptr_pair] : pendingInstantSendLocks) { // Check if we've reached max count if (ret.m_pending_is.size() >= maxCount) { ret.m_pending_work = true; diff --git a/src/llmq/blockprocessor.cpp b/src/llmq/blockprocessor.cpp index 0baf23fb9abf..4fc57aba77de 100644 --- a/src/llmq/blockprocessor.cpp +++ b/src/llmq/blockprocessor.cpp @@ -441,7 +441,7 @@ bool CQuorumBlockProcessor::GetCommitmentsFromBlock(const CBlock& block, gsl::no for (const auto& tx : block.vtx) { if (tx->nType == TRANSACTION_QUORUM_COMMITMENT) { - const auto opt_qc = GetTxPayload(*tx); + auto opt_qc = GetTxPayload(*tx); if (!opt_qc) { // should not happen as it was verified before processing the block LogPrint(BCLog::LLMQ, "CQuorumBlockProcessor::%s height=%d GetTxPayload fails\n", __func__, pindex->nHeight); diff --git a/src/llmq/snapshot.cpp b/src/llmq/snapshot.cpp index 123ad4c90194..cf744f6c781a 100644 --- a/src/llmq/snapshot.cpp +++ b/src/llmq/snapshot.cpp @@ -209,17 +209,16 @@ bool BuildQuorumRotationInfo(CDeterministicMNManager& dmnman, CQuorumSnapshotMan response.quorumSnapshotAtHMinus4C = std::move(snapshotHMinus4C.value()); } - CSimplifiedMNListDiff mn4c; if (!BuildSimplifiedMNListDiff(dmnman, chainman, qblockman, qman, GetLastBaseBlockHash(baseBlockIndexes, pWorkBlockHMinus4CIndex, use_legacy_construction), - pWorkBlockHMinus4CIndex->GetBlockHash(), mn4c, errorRet)) { + pWorkBlockHMinus4CIndex->GetBlockHash(), response.mnListDiffAtHMinus4C, errorRet)) { + response.mnListDiffAtHMinus4C = {}; return false; } if (!use_legacy_construction) { baseBlockIndexes.push_back(pWorkBlockHMinus4CIndex); } - response.mnListDiffAtHMinus4C = std::move(mn4c); } else { response.extraShare = false; } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index fbb893e637f2..8401e186c750 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2527,7 +2527,7 @@ static RPCHelpMan scantxoutset() for (const UniValue& scanobject : request.params[1].get_array().getValues()) { FlatSigningProvider provider; auto scripts = EvalDescriptorStringOrObject(scanobject, provider); - for (const auto& script : scripts) { + for (CScript& script : scripts) { std::string inferred = InferDescriptor(script, provider)->ToString(); needles.emplace(script); descriptors.emplace(std::move(script), std::move(inferred)); diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 1b84c6f580e8..49d14db91562 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -547,7 +547,7 @@ class DescriptorImpl : public Descriptor if (pos++) ret += ","; std::string tmp; if (!scriptarg->ToStringHelper(arg, tmp, type, cache)) return false; - ret += std::move(tmp); + ret += tmp; } return true; } @@ -571,7 +571,7 @@ class DescriptorImpl : public Descriptor tmp = pubkey->ToString(); break; } - ret += std::move(tmp); + ret += tmp; } std::string subscript; if (!ToStringSubScriptHelper(arg, subscript, type, cache)) return false; diff --git a/src/test/fuzz/txorphan.cpp b/src/test/fuzz/txorphan.cpp index d89c06ba0a14..1a279b6bb74c 100644 --- a/src/test/fuzz/txorphan.cpp +++ b/src/test/fuzz/txorphan.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/src/univalue/include/univalue.h b/src/univalue/include/univalue.h index 076fcfa43277..d895b26af988 100644 --- a/src/univalue/include/univalue.h +++ b/src/univalue/include/univalue.h @@ -77,14 +77,14 @@ class UniValue { bool isArray() const { return (typ == VARR); } bool isObject() const { return (typ == VOBJ); } - void push_back(const UniValue& val); + void push_back(UniValue val); void push_backV(const std::vector& vec); template void push_backV(It first, It last); - void __pushKV(const std::string& key, const UniValue& val); - void pushKV(const std::string& key, const UniValue& val); - void pushKVs(const UniValue& obj); + void __pushKV(std::string key, UniValue val); + void pushKV(std::string key, UniValue val); + void pushKVs(UniValue obj); std::string write(unsigned int prettyIndent = 0, unsigned int indentLevel = 0) const; diff --git a/src/univalue/lib/univalue.cpp b/src/univalue/lib/univalue.cpp index 4c8066767a94..29660ceedc46 100644 --- a/src/univalue/lib/univalue.cpp +++ b/src/univalue/lib/univalue.cpp @@ -101,11 +101,11 @@ void UniValue::setObject() typ = VOBJ; } -void UniValue::push_back(const UniValue& val_) +void UniValue::push_back(UniValue val) { checkType(VARR); - values.push_back(val_); + values.push_back(std::move(val)); } void UniValue::push_backV(const std::vector& vec) @@ -115,32 +115,32 @@ void UniValue::push_backV(const std::vector& vec) values.insert(values.end(), vec.begin(), vec.end()); } -void UniValue::__pushKV(const std::string& key, const UniValue& val_) +void UniValue::__pushKV(std::string key, UniValue val) { checkType(VOBJ); - keys.push_back(key); - values.push_back(val_); + keys.push_back(std::move(key)); + values.push_back(std::move(val)); } -void UniValue::pushKV(const std::string& key, const UniValue& val_) +void UniValue::pushKV(std::string key, UniValue val) { checkType(VOBJ); size_t idx; if (findKey(key, idx)) - values[idx] = val_; + values[idx] = std::move(val); else - __pushKV(key, val_); + __pushKV(std::move(key), std::move(val)); } -void UniValue::pushKVs(const UniValue& obj) +void UniValue::pushKVs(UniValue obj) { checkType(VOBJ); obj.checkType(VOBJ); for (size_t i = 0; i < obj.keys.size(); i++) - __pushKV(obj.keys[i], obj.values.at(i)); + __pushKV(std::move(obj.keys.at(i)), std::move(obj.values.at(i))); } void UniValue::getObjMap(std::map& kv) const diff --git a/src/util/message.cpp b/src/util/message.cpp index 685093bb12da..2a723b3bf208 100644 --- a/src/util/message.cpp +++ b/src/util/message.cpp @@ -8,7 +8,6 @@ #include #include #include