From 4ce5c161d91c008247cb14b41478d8dcdae4ea69 Mon Sep 17 00:00:00 2001 From: Tom Holz Date: Sun, 6 Aug 2023 08:08:27 -0700 Subject: [PATCH 1/2] Throttles requests according to API rate limits Dynamically throttles API requests according the the rate limit policies in the HTTP reply headers. --- acquisition.pro | 2 + ratelimits.json | 14 + resources.qrc | 3 + src/itemsmanagerworker.cpp | 118 +++-- src/itemsmanagerworker.h | 18 +- src/mainwindow.h | 5 +- src/ratelimit.cpp | 961 +++++++++++++++++++++++++++++++++++++ src/ratelimit.h | 112 +++++ 8 files changed, 1173 insertions(+), 60 deletions(-) create mode 100644 ratelimits.json create mode 100644 src/ratelimit.cpp create mode 100644 src/ratelimit.h diff --git a/acquisition.pro b/acquisition.pro index 744abe752..2fd32e5c8 100644 --- a/acquisition.pro +++ b/acquisition.pro @@ -58,6 +58,7 @@ SOURCES += \ src/modsfilter.cpp \ src/porting.cpp \ src/replytimeout.cpp \ + src/ratelimit.cpp \ src/search.cpp \ src/shop.cpp \ src/steamlogindialog.cpp \ @@ -105,6 +106,7 @@ HEADERS += \ src/modsfilter.h \ src/porting.h \ src/rapidjson_util.h \ + src/ratelimit.h \ src/replytimeout.h \ src/search.h \ src/selfdestructingreply.h \ diff --git a/ratelimits.json b/ratelimits.json new file mode 100644 index 000000000..f63029d31 --- /dev/null +++ b/ratelimits.json @@ -0,0 +1,14 @@ +[ +{ + "policy-name" : "backend-character-request-limit", + "policy-endpoints" : [ + "/character-window/get-characters", + "/character-window/get-passive-skills"] +}, +{ + "policy-name" : "backend-item-request-limit", + "policy-endpoints" : [ + "/character-window/get-items", + "/character-window/get-stash-items"] +} +] \ No newline at end of file diff --git a/resources.qrc b/resources.qrc index d70bf2f83..4071f4031 100644 --- a/resources.qrc +++ b/resources.qrc @@ -1,4 +1,7 @@ + + ratelimits.json + assets/Socket_I.png assets/Socket_D.png diff --git a/src/itemsmanagerworker.cpp b/src/itemsmanagerworker.cpp index cbf87e65f..e08cdc0cb 100644 --- a/src/itemsmanagerworker.cpp +++ b/src/itemsmanagerworker.cpp @@ -42,6 +42,7 @@ #include "filesystem.h" #include "modlist.h" #include "network_info.h" +#include "ratelimit.h" const char *kStashItemsUrl = "https://www.pathofexile.com/character-window/get-stash-items"; const char *kCharacterItemsUrl = "https://www.pathofexile.com/character-window/get-items"; @@ -93,12 +94,14 @@ void ItemsManagerWorker::Init(){ updating_ = true; QNetworkRequest PoE_item_classes_request = QNetworkRequest(QUrl(QString(kRePoE_item_classes))); - QNetworkReply *PoE_item_classes_reply = network_manager_.get(PoE_item_classes_request); - connect(PoE_item_classes_reply, &QNetworkReply::finished, this, &ItemsManagerWorker::OnItemClassesReceived); + RateLimit::Init(&network_manager_, this); + RateLimit::Submit(PoE_item_classes_request, + [=](QNetworkReply* reply) { + OnItemClassesReceived(reply); + }); } -void ItemsManagerWorker::OnItemClassesReceived(){ - QNetworkReply *reply = qobject_cast(QObject::sender()); +void ItemsManagerWorker::OnItemClassesReceived(QNetworkReply *reply){ if (reply->error()) { QLOG_ERROR() << "Couldn't fetch RePoE Item Classes: " << reply->url().toDisplayString() @@ -109,12 +112,13 @@ void ItemsManagerWorker::OnItemClassesReceived(){ } QNetworkRequest PoE_item_base_types_request = QNetworkRequest(QUrl(QString(kRePoE_item_base_types))); - QNetworkReply *PoE_item_base_types_reply = network_manager_.get(PoE_item_base_types_request); - connect(PoE_item_base_types_reply, &QNetworkReply::finished, this, &ItemsManagerWorker::OnItemBaseTypesReceived); + RateLimit::Submit(PoE_item_base_types_request, + [=](QNetworkReply* reply) { + OnItemBaseTypesReceived(reply); + }); } -void ItemsManagerWorker::OnItemBaseTypesReceived(){ - QNetworkReply *reply = qobject_cast(QObject::sender()); +void ItemsManagerWorker::OnItemBaseTypesReceived(QNetworkReply* reply){ if (reply->error()) { QLOG_ERROR() << "Couldn't fetch RePoE Item Base Types: " << reply->url().toDisplayString() @@ -218,12 +222,13 @@ void ItemsManagerWorker::UpdateModList(){ modsUpdating_ = true; QNetworkRequest PoE_stat_translations_request = QNetworkRequest(QUrl(QString(kRePoE_stat_translations))); - QNetworkReply *PoE_stats_reply = network_manager_.get(PoE_stat_translations_request); - connect(PoE_stats_reply, &QNetworkReply::finished, this, &ItemsManagerWorker::OnStatTranslationsReceived); + RateLimit::Submit(PoE_stat_translations_request, + [=](QNetworkReply* reply) { + OnStatTranslationsReceived(reply); + }); } -void ItemsManagerWorker::OnStatTranslationsReceived(){ - QNetworkReply *reply = qobject_cast(QObject::sender()); +void ItemsManagerWorker::OnStatTranslationsReceived(QNetworkReply* reply){ if (reply->error()) { QLOG_ERROR() << "Couldn't fetch RePoE Stat Translations: " << reply->url().toDisplayString() @@ -316,12 +321,13 @@ void ItemsManagerWorker::Update(TabSelection::Type type, const std::vector(QObject::sender()); +void ItemsManagerWorker::OnMainPageReceived(QNetworkReply* reply) { if (reply->error()) { QLOG_WARN() << "Couldn't fetch main page: " << reply->url().toDisplayString() << " due to error: " << reply->errorString(); @@ -337,14 +343,15 @@ void ItemsManagerWorker::OnMainPageReceived() { // now get character list QNetworkRequest characters_request = Request(QUrl(kGetCharactersUrl), ItemLocation(), TabCache::Refresh); characters_request.setHeader(QNetworkRequest::KnownHeaders::UserAgentHeader, USER_AGENT); - QNetworkReply *characters = network_manager_.get(characters_request); - connect(characters, &QNetworkReply::finished, this, &ItemsManagerWorker::OnCharacterListReceived); + RateLimit::Submit(characters_request, + [=](QNetworkReply* reply) { + OnCharacterListReceived(reply); + }); reply->deleteLater(); } -void ItemsManagerWorker::OnCharacterListReceived() { - QNetworkReply *reply = qobject_cast(QObject::sender()); +void ItemsManagerWorker::OnCharacterListReceived(QNetworkReply* reply) { QByteArray bytes = reply->readAll(); rapidjson::Document doc; doc.Parse(bytes.constData()); @@ -437,8 +444,13 @@ void ItemsManagerWorker::OnCharacterListReceived() { } } - QNetworkReply *first_tab = network_manager_.get(MakeTabRequest(tabToReq.get_tab_id(), ItemLocation(), true, true)); - connect(first_tab, SIGNAL(finished()), this, SLOT(OnFirstTabReceived())); + QNetworkRequest tab_request = MakeTabRequest(tabToReq.get_tab_id(), ItemLocation(), true, true); + + RateLimit::Submit(tab_request, + [=](QNetworkReply* reply) { + OnFirstTabReceived(reply); + }); + reply->deleteLater(); } @@ -517,12 +529,16 @@ void ItemsManagerWorker::FetchItems(int limit) { QNetworkRequest fetch_request = request.network_request; fetch_request.setHeader(QNetworkRequest::KnownHeaders::UserAgentHeader, USER_AGENT); - QNetworkReply *fetched = network_manager_.get(fetch_request); - signal_mapper_->setMapping(fetched, request.id); - connect(fetched, SIGNAL(finished()), signal_mapper_, SLOT(map())); + int id = request.id; + ItemLocation location = request.location; + RateLimit::Submit(fetch_request, + [=](QNetworkReply* reply) { + OnTabReceived(reply, id, location); + }); + ItemsReply reply; - reply.network_reply = fetched; + reply.network_reply = nullptr; reply.request = request; replies_[request.id] = reply; @@ -534,8 +550,7 @@ void ItemsManagerWorker::FetchItems(int limit) { cached_requests_completed_ = 0; } -void ItemsManagerWorker::OnFirstTabReceived() { - QNetworkReply *reply = qobject_cast(QObject::sender()); +void ItemsManagerWorker::OnFirstTabReceived(QNetworkReply* reply) { QByteArray bytes = reply->readAll(); rapidjson::Document doc; doc.Parse(bytes.constData()); @@ -615,7 +630,6 @@ void ItemsManagerWorker::OnFirstTabReceived() { FetchItems(kThrottleRequests - 1); - connect(signal_mapper_, SIGNAL(mapped(int)), this, SLOT(OnTabReceived(int))); reply->deleteLater(); } @@ -639,25 +653,21 @@ void ItemsManagerWorker::ParseItems(rapidjson::Value *value_ptr, ItemLocation ba } } -void ItemsManagerWorker::OnTabReceived(int request_id) { - if (!replies_.count(request_id)) { - QLOG_WARN() << "Received a reply for request" << request_id << "that was not requested."; - return; - } - - ItemsReply reply = replies_[request_id]; +void ItemsManagerWorker::OnTabReceived(QNetworkReply* network_reply, int request_id, ItemLocation location) { - bool reply_from_cache = reply.network_reply->attribute(QNetworkRequest::SourceIsFromCacheAttribute).toBool(); + bool reply_from_cache = network_reply->attribute(QNetworkRequest::SourceIsFromCacheAttribute).toBool(); if (reply_from_cache) { - QLOG_DEBUG() << "Received a cached reply for" << reply.request.location.GetHeader().c_str(); + QLOG_DEBUG() << "Received a cached reply for" << location.GetHeader().c_str(); ++cached_requests_completed_; ++total_cached_; } else { - QLOG_DEBUG() << "Received a reply for" << reply.request.location.GetHeader().c_str(); + + QLOG_DEBUG() << "Received a reply for" << location.GetHeader().c_str(); } - QByteArray bytes = reply.network_reply->readAll(); + + QByteArray bytes = network_reply->readAll(); rapidjson::Document doc; doc.Parse(bytes.constData()); @@ -674,23 +684,27 @@ void ItemsManagerWorker::OnTabReceived(int request_id) { // We index expected tabs and their locations as part of the first fetch. It's possible for users // to move or rename tabs during the update which will result in the item data being out-of-sync with // expected index/tab name map. We need to detect this case and abort the update. - if (!cancel_update_ && !error && (reply.request.location.get_type() == ItemLocationType::STASH)) { + + if (!cancel_update_ && !error && (location.get_type() == ItemLocationType::STASH)) { if (!doc.HasMember("tabs") || doc["tabs"].Size() == 0) { QLOG_ERROR() << "Full tab information missing from stash tab fetch. Cancelling update. Full fetch URL: " - << reply.request.network_request.url().toDisplayString(); + << network_reply->request().url().toDisplayString(); + cancel_update_ = true; } else { std::string tabs_as_string = Util::RapidjsonSerialize(doc["tabs"]); auto tabs_signature_current = CreateTabsSignatureVector(tabs_as_string); - auto tab_id = reply.request.location.get_tab_id(); + + auto tab_id = location.get_tab_id(); if (tabs_signature_[tab_id] != tabs_signature_current[tab_id]) { if (reply_from_cache) { // Here we unexpectedly are seeing a cached document that is out-of-sync with current tab state // This is not fatal but unexpected as we shouldn't get here if everything else is done right. // If we do see, set 'error' condition which causes us to flush from catch and re-fetch from server. QLOG_WARN() << "Unexpected hit on stale cached tab. Flushing and re-fetching request: " - << reply.request.network_request.url().toDisplayString(); + << network_reply->request().url().toDisplayString(); + error = true; // Isn't really cached since we're erroring out and replaying so fix up stats total_cached_--; @@ -710,7 +724,8 @@ void ItemsManagerWorker::OnTabReceived(int request_id) { QLOG_ERROR() << "You renamed or re-ordered tabs in game while acquisition was in the middle of the update," << " aborting to prevent synchronization problems and pricing data loss. Mismatch reason(s) -> " - << reason.c_str() << ". For request: " << reply.request.network_request.url().toDisplayString(); + << reason.c_str() << ". For request: " << network_reply->request().url().toDisplayString(); + cancel_update_ = true; } } @@ -721,8 +736,8 @@ void ItemsManagerWorker::OnTabReceived(int request_id) { if (error) { // We can 'cache' error response document so make sure we remove it // before reque - tab_cache_->remove(reply.request.network_request.url()); - QueueRequest(reply.request.network_request, reply.request.location); + tab_cache_->remove(network_reply->request().url()); + QueueRequest(network_reply->request(), location); } ++requests_completed_; @@ -762,7 +777,8 @@ void ItemsManagerWorker::OnTabReceived(int request_id) { if (error) return; - ParseItems(&doc["items"], reply.request.location, doc.GetAllocator()); + + ParseItems(&doc["items"], location, doc.GetAllocator()); if ((total_completed_ == total_needed_) && !cancel_update_) { // It's possible that we receive character vs stash tabs out of order, or users @@ -814,13 +830,15 @@ void ItemsManagerWorker::OnTabReceived(int request_id) { PreserveSelectedCharacter(); } - reply.network_reply->deleteLater(); + + network_reply->deleteLater(); } void ItemsManagerWorker::PreserveSelectedCharacter() { if (selected_character_.empty()) return; - network_manager_.get(MakeCharacterRequest(selected_character_, ItemLocation())); + QNetworkRequest character_request = MakeCharacterRequest(selected_character_, ItemLocation()); + RateLimit::Submit(character_request, [](QNetworkReply*) {}); } diff --git a/src/itemsmanagerworker.h b/src/itemsmanagerworker.h index 6a8d178a6..af2d2cec3 100644 --- a/src/itemsmanagerworker.h +++ b/src/itemsmanagerworker.h @@ -36,8 +36,8 @@ class QTimer; class BuyoutManager; class TabCache; -const int kThrottleRequests = 45; -const int kThrottleSleep = 60; +const int kThrottleRequests = 10000; +const int kThrottleSleep = 1; const int kMaxCacheSize = (1000*1024*1024); // 1GB struct ItemsRequest { @@ -63,10 +63,10 @@ public slots: void ParseItemMods(); void Update(TabSelection::Type type, const std::vector &tab_names = std::vector()); public slots: - void OnMainPageReceived(); - void OnCharacterListReceived(); - void OnFirstTabReceived(); - void OnTabReceived(int index); + void OnMainPageReceived(QNetworkReply* reply); + void OnCharacterListReceived(QNetworkReply* reply); + void OnFirstTabReceived(QNetworkReply* reply); + void OnTabReceived(QNetworkReply* reply, int index, ItemLocation location); /* * Makes 45 requests at once, should be called every minute. * These values are approximated (GGG throttles requests) @@ -76,9 +76,9 @@ public slots: void PreserveSelectedCharacter(); void Init(); - void OnStatTranslationsReceived(); - void OnItemClassesReceived(); - void OnItemBaseTypesReceived(); + void OnStatTranslationsReceived(QNetworkReply* reply); + void OnItemClassesReceived(QNetworkReply* reply); + void OnItemBaseTypesReceived(QNetworkReply* reply); signals: void ItemsRefreshed(const Items &items, const std::vector &tabs, bool initial_refresh); void StatusUpdate(const CurrentStatusUpdate &status); diff --git a/src/mainwindow.h b/src/mainwindow.h index 2edc56ad9..4c974b730 100755 --- a/src/mainwindow.h +++ b/src/mainwindow.h @@ -71,12 +71,15 @@ enum class ProgramState { ShopSubmitting, ShopCompleted, UpdateCancelled, - ItemsRetrieved + ItemsRetrieved, + RateLimitPause, + RateLimitViolation }; struct CurrentStatusUpdate { ProgramState state; int progress{}, total{}, cached{}; + QString message; }; class MainWindow : public QMainWindow { diff --git a/src/ratelimit.cpp b/src/ratelimit.cpp new file mode 100644 index 000000000..5ac6f5501 --- /dev/null +++ b/src/ratelimit.cpp @@ -0,0 +1,961 @@ +/* + Copyright 2023 Tom Holz + + This file is part of Acquisition. + + Acquisition is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Acquisition is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Acquisition. If not, see . +*/ + +#include "ratelimit.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "rapidjson/document.h" +#include "rapidjson/error/en.h" + +#include "mainwindow.h" +#include "itemsmanagerworker.h" +#include "QsLog.h" + +using namespace RateLimit; + +//========================================================================================= +// INTERNALS +//========================================================================================= + +// Everything used internally by the rate limiter is defined in an anoynmous +// namespace to make it inaccesible to the rest of the application. This is +// to avoid bugs and accidental coupling. + +namespace { + + // This HTTP status code means there was a rate limit violation. + const int RATE_LIMIT_VIOLATION_STATUS = 429; + + // A delay added to requests to make sure we don't get a violation. + const int VIOLATION_AVOIDANCE_MSEC = 50; + + // When there is a violation, add this much time to how long we + // wait just to make sure we don't trigger another violation. + const int EXTRA_RATE_VIOLATION_MSEC = 250; + + // Return the name of the policy from a network reply + QByteArray GetPolicy(QNetworkReply* const reply) { + assert(reply->hasRawHeader("X-Rate-Limit-Policy")); + return reply->rawHeader("X-Rate-Limit-Policy"); + }; + + // Return the name(s) of the rule(s) from a network reply + QByteArrayList GetRules(QNetworkReply* const reply) { + assert(reply->hasRawHeader("X-Rate-Limit-Rules")); + QByteArrayList names = reply->rawHeader("X-Rate-Limit-Rules").split(','); + assert(names.size() > 0); + return names; + } + + // Return a list of one or more items that define a rule's limits. + QByteArrayList GetLimits(QNetworkReply* const reply, const QByteArray& rule) { + const QByteArray header = "X-Rate-Limit-" + rule; + assert(reply->hasRawHeader(header)); + QByteArrayList things = reply->rawHeader(header).split(','); + assert(things.size() > 0); + return things; + } + + // Return a list of one or more items that define a rule's current state. + QByteArrayList GetStates(QNetworkReply* const reply, const QByteArray& rule) { + const QByteArray header = "X-Rate-Limit-" + rule + "-State"; + assert(reply->hasRawHeader(header)); + QByteArrayList things = reply->rawHeader(header).split(','); + assert(things.size() > 0); + return things; + } + + // Return the date from the HTTP reply header. + QDateTime GetDate(QNetworkReply* const reply) { + assert(reply->hasRawHeader("Date")); + QString timestamp = QString(reply->rawHeader("Date")); + if (timestamp.endsWith("GMT")) { + // It was necessary to replace GMT with UTC for QDateTime::fromString() to + // correctly parse the timestamp. There is probably a better way to handle this, + // because the current implementation *might* break if the responses start + // including timezone information in a slightly different format, like "GMT-0". + timestamp.chop(3); + timestamp.append("UTC"); + }; + const QDateTime date = QDateTime::fromString(timestamp, "ddd, dd MMM yyyy hh:mm:ss t"); + assert(date.isValid()); + return date; + } + + // Return the "endpoint" for a given URL. Since this code assumes the only requests + // we care about are those to pathofexile.com, the endpoint is effectively defined + // by the url's path. + // + // For urls which are not rate limited, the string "" is returned. + // This includes requests to the Path of Exile main page, which have a path of "/" + // and do not have a rate limit policy... yet? + QString GetEndpoint(const QUrl& url) { + const QString& host = url.host(); + const QString& path = url.path(); + if (((host == "www.pathofexile.com") || (host == "pathofexile.com")) && (path != "/")) { + return path; + } else { + return ""; + }; + } + + //------------------------------------------------------------------------------ + // Next, declarations for the classes that represent a rate-limit policy + //------------------------------------------------------------------------------ + // + // Each API response has a rate-limit policy that applies to it. + // Those responses are present in the HTTP reply headers. Here's + // how they are concieved, briefly: + // + // Every endpoint only has one applicable policy. + // Different endpoints may share the same rate limit policy. + // + // A policy has a name. + // A policy has one or more rules. + // A policy applies to one or more endpoints. + // + // Each rule has a name. + // Each rule has one or more limitation items. + // + // Each limitation item defines one set of limits + // Each limitation item comes with information on the state of that limitation. + // + // For any request against a rate-limited endpoint, only one policy applies, but + // all of limitations for each item of every rule within that policy are checked. + + struct RateLimitElement { + RateLimitElement(int hits_, int period_, int restriction_); + int hits; + int period; + int restriction; + }; + + // Both RateLimitItem and RateLimitRule have default constructors because + // they are the two policy-related classes where it's useful to preallocate + // arrays of known size. + // + // Specifically, this is used by RateLimit::Init(), by the RateLimitPolicy + // constructor which parses a network reply. + // + // I avoided using default arguments because I find them confusing and + // more likely to result in unintended consequences. + + struct RateLimitItem { + RateLimitItem(); + RateLimitItem(int hits, int period, int restriction); + RateLimitElement limit; + RateLimitElement state; + }; + + struct RateLimitRule { + RateLimitRule(); + RateLimitRule(QString name_, std::vector items_); + QString name; + std::vector items; + }; + + struct RateLimitPolicy { + RateLimitPolicy(QString name_, QStringList endpoints_); + RateLimitPolicy(QNetworkReply* const reply); + QString name; + QDateTime timestamp; + QStringList endpoints; + std::vector rules; + }; + + // The ACTIVE_WAITING state is used to update the status bar. The others aren't used yet. + enum class RequestState {CREATED, QUEUED, ACTIVE, ACTIVE_WAITING, SENT, REPLIED, DISPATCHED, FINISHED}; + + struct RateLimitedRequest { + + // Construct a new rate-limited request. + RateLimitedRequest(const QNetworkRequest request, const Callback callback); + + // The state of the request. + RequestState state; + + // Unique identified for each request, even through different requests can be + // routed to different policy managers based on different endpoints. + const unsigned long id; + + // A copy of this request's API endpoint, if any. + const QString endpoint; + + // A copy of the network request that's going to be sent. + const QNetworkRequest network_request; + + // A pointer where the reply can be found after this request is sent. + QNetworkReply* network_reply; + + // The function to be called when a non-rate-limit reply is recieved. + const Callback worker_callback; + + // The time a reply was made baesd on the reply's HTTP Date header. + QDateTime reply_time; + + // The HTTP status of the reply. + int reply_status; + + private: + // Total number of requests that have every been constructed. + static unsigned long request_count; + }; + + //------------------------------------------------------------------------------ + // Each rate-limit policy has it's own manager, defined here + //------------------------------------------------------------------------------ + + class RateLimitManager { + //------------------------------------------------------------------------- + // RateLimitManager - instance members and methods + //------------------------------------------------------------------------- + public: + // Construct a rate limit manager with the specified policy. The constructor must be + // public because it's called by std::make_unique(). + RateLimitManager(std::unique_ptr); + + private: + // Each rate limit manager has a unique id. This was mainly used in + // initial debugging because I am new to unique_ptr and very inexperienced + // with C++ concepts like memory management and reference copying. + const unsigned long id; + + // Keeps track of how many requests this policy manager has sent. + unsigned long requests_sent; + + // Returns true if this policy applies to the endpoint. + bool IsApplicable(const QString& endpoint); + + // Move a request into to this manager's queue. + void QueueRequest(std::unique_ptr request); + + // This is called whenever the policy is update, which happens either + // at construction inside Init() or when a QNetworkReply with a + // X-Rate-Limit-Policy header is received. + void OnPolicyUpdate(); + + // If you guessed that this sends the currently active request. The + // active request is stored as a member variable so that it doesn't have + // to be passed around in lambda functors, which is tough to do with + // unique pointers, beacuse they are fussy about being copied. + void SendActiveRequest(); + + // This is called when a reply has been received. Like the unique pointer + // to the active request, the network reply is stored as a member. + void ReceiveReply(); + + // Resend the active request after the delay specified in the active reply. + void ResendAfterViolation(); + + // Keep a unique_ptr to the policy associated with this manager, + // which will be updated whenever a reply with the X-Rate-Limit-Policy + // header is received. + std::unique_ptr policy; + + // Simple flag to check if the active_request is in use. It's possible + // to detect this by checking the status of the active request pointer, + // but using a separate boolean flag is claer. + bool busy; + + // Keep track of wether a violation was detected. Currently this is + // only used to update the program status so the user knows wether + // a reply is being delay to a violation or not. + bool violation; + + // This timer sends the active request. + QTimer active_request_timer; + + // Unique pointer to the currently active request. + std::unique_ptr active_request; + + // Other requests in this list have been queued up. When an active request + // has been successfully send and replied, the next request will be taken + // from this queue. + std::list> request_queue; + + // When a reply is recieved and the policy state has been updated or a + // rate violation has been detected, the next possible send time is calculated + // and stored here. + QDateTime next_send; + + // We use a history of the received reply times so that we can calculate + // when the next safe send time will be. This allows us to calculate the + // least delay necessary to stay compliant. + // + // A circular buffer is used because it's fast to access, and the number + // of items we have to store only changes when a rate limit policy + // changes, which should not happen regularly, but we handle that case, too. + boost::circular_buffer known_reply_times; + + //------------------------------------------------------------------------- + // RateLimitManager - static members and methods + //------------------------------------------------------------------------- + public: + // Must be called before submitting any requests. + static void Init(QNetworkAccessManager* network_manager, ItemsManagerWorker* items_worker); + + // Submits a request to the appropriate rate limit manager. + static void Submit(QNetworkRequest network_request, Callback request_callback); + + private: + // When a request has been successfully replied-to, then it's ready to be + // dispatched. There's a special function for this because replies may come + // back in a different order than they were submitted. This function + // keeps track of which replies have been recieved and triggers callback + // in order, so the calling code doesn't have to worry about order. + static void Dispatch(std::unique_ptr request); + + // Used to send network requests. + static QNetworkAccessManager* network_manager; + + // Used to update the program state during throttling. + static ItemsManagerWorker* items_worker; + + // Flag to make sure Init() is only called once. + static bool initialized; + + // Total number of policy managers that have been constructed. + static unsigned long manager_count; + + // This timer updates the program status bar. + static QTimer status_updater; + + // Update the program status while we wait to send the next request + // so the user sees something happening. + static void UpdateStatus(); + + // One manager to handle requests that none of the existing policies + // apply to, including non-API requests. + static std::unique_ptr default_manager; + + // One manager for each rate limit policy. + static std::vector> managers; + + // Move finished requests into their own list so they can be reordered by + // request id, which is how we guarantee that request callbacks will be + // dispatched in order. + static std::list> finished_requests; + + // Request id of the next request that should be sent back to the application + static unsigned long next_request_to_send; + }; +} + +RateLimitElement::RateLimitElement(int hits_, int period_, int restriction_) : + hits(hits_), + period(period_), + restriction(restriction_) {}; + +RateLimitItem::RateLimitItem() : + limit(0, 0, 0), + state(0, 0, 0) {}; + +RateLimitItem::RateLimitItem(int hits, int period, int restriction) : + limit(hits, period, restriction), + state(0, 0, 0) {}; + +RateLimitRule::RateLimitRule() {}; + +RateLimitRule::RateLimitRule(QString name_, std::vector items_) : + name(name_), + items(items_) {}; + +RateLimitPolicy::RateLimitPolicy(QString name_, QStringList endpoints_) : + name(name_), + endpoints(endpoints_), + timestamp(QDateTime::currentDateTime()) {}; + +RateLimitPolicy::RateLimitPolicy(QNetworkReply* const reply) : + name(GetPolicy(reply)), + timestamp(GetDate(reply)) +{ + + const QByteArrayList rule_names = GetRules(reply); + const int rule_count = rule_names.size(); + + // Allocate a new vector of rules for this policy. + rules = std::vector(rule_count); + + // Iterate over all the rule names expected. + for (int i = 0; i < rule_count; ++i) { + + // Get information from this rule. + const QByteArray& rule_name = rule_names[i]; + const QByteArrayList limits = GetLimits(reply, rule_name); + const QByteArrayList states = GetStates(reply, rule_name); + const int item_count = limits.size(); + assert(item_count == states.size()); + + // Set the rule name. + RateLimitRule& rule = rules[i]; + rule.name = rule_name; + + // Allocate a new vector of items for this rule. + rule.items = std::vector(item_count); + + for (int j = 0; j < item_count; ++j) { + + const QByteArrayList limit_parts = limits[j].split(':'); + const QByteArrayList state_parts = states[j].split(':'); + assert(limit_parts.size() == 3); + assert(state_parts.size() == 3); + + RateLimitItem& item = rule.items[j]; + + item.limit.hits = limit_parts[0].toInt(); + item.limit.period = limit_parts[1].toInt(); + item.limit.restriction = limit_parts[2].toInt(); + + item.state.hits = state_parts[0].toInt(); + item.state.period = state_parts[1].toInt(); + item.state.restriction = state_parts[2].toInt(); + + assert(item.limit.period == item.state.period); + }; + }; +} + +//------------------------------------------------------------------------------ +// Local class that represents a single rate-limit request +//------------------------------------------------------------------------------ + +// Create a new rate-limited request. +RateLimitedRequest::RateLimitedRequest(const QNetworkRequest request, const RateLimit::Callback callback) : + id(++request_count), + state(RequestState::CREATED), + endpoint(GetEndpoint(request.url())), + network_request(request), + network_reply(nullptr), + reply_time(QDateTime()), + reply_status(-1), + worker_callback(callback) +{}; + +unsigned long RateLimitedRequest::request_count = 0; + +//------------------------------------------------------------------------------ +// Each rate-limit policy has it's own manager, defined here +//------------------------------------------------------------------------------ + +QNetworkAccessManager* RateLimitManager::network_manager = nullptr; +ItemsManagerWorker* RateLimitManager::items_worker = nullptr; +bool RateLimitManager::initialized = false; + +// In the beginning, there were zero rate limit managers. +unsigned long RateLimitManager::manager_count = 0; + +// Used to update the program status. +QTimer RateLimitManager::status_updater; + +// Default manager for non-rate-limited requests. +std::unique_ptr RateLimitManager::default_manager = nullptr; + +// All the rate limit managers that have been created so far. +std::vector> RateLimitManager::managers = {}; + +// All the requests that have finished and are ready for callback. +std::list> RateLimitManager::finished_requests = {}; + +// Request id of the next request we expect to dispatch. +unsigned long RateLimitManager::next_request_to_send = 1; + +// Initalized the rate limit manager. +void RateLimitManager::Init(QNetworkAccessManager* manager, ItemsManagerWorker* worker) { + + // Make sure we haven't been initialized before, e.g. by another thread + // or a different copy of the application or something. I'm not sure if + // this would ever happen, but it feels like a safe thing to check. + assert(initialized == false); + + // Save the pointers we need to send requests and update program status. + network_manager = manager; + items_worker = worker; + + // Setup the program status update timer. + status_updater.setSingleShot(false); + status_updater.setInterval(1000); + QObject::connect(&status_updater, &QTimer::timeout, [=]() { UpdateStatus(); }); + + // Construct the default manager for requests without a rate-limited endpoint + default_manager = std::make_unique( + std::make_unique( + QString(""), + QStringList({ "" }))); + + // Read ratelimits.json + //QDir jsondir(QCoreApplication::applicationDirPath()); + //QString jsonpath = jsondir.filePath("ratelimits.json"); + QFile jsonfile(":/ratelimits.json"); + if (!jsonfile.open(QIODevice::ReadOnly | QFile::Text)) { + QLOG_ERROR() << "Could not open ratelimits.json:" << jsonfile.errorString(); + throw std::runtime_error("Could not open ratelimits.json"); + }; + QByteArray jsoncontents = jsonfile.readAll(); + jsonfile.close(); + + // Parse ratelimits.json + rapidjson::Document document; + document.Parse(jsoncontents.data()); + if (document.HasParseError()) { + QLOG_ERROR() << "Malformed ratelimits.json file:" + << rapidjson::GetParseError_En(document.GetParseError()); + throw std::runtime_error("Could not parse ratelimits.json"); + }; + assert(document.IsArray()); + + // Build policy managers for each of the policies in the rate limits json document. + for (rapidjson::SizeType i = 0; i < document.Size(); ++i) { + + const rapidjson::Value& item = document[i]; + assert(item.HasMember("policy-name")); + assert(item.HasMember("policy-endpoints")); + + const rapidjson::Value& json_policy = item["policy-name"]; + const rapidjson::Value& json_endpoints = item["policy-endpoints"]; + assert(json_policy.IsString()); + assert(json_endpoints.IsArray()); + + QString policy_name = json_policy.GetString(); + QStringList policy_endpoints = {}; + for (rapidjson::SizeType j = 0; j < json_endpoints.Size(); ++j) { + const rapidjson::Value& json_endpoint = json_endpoints[i]; + assert(json_endpoint.IsString()); + QString endpoint = json_endpoints[j].GetString(); + policy_endpoints.push_back(endpoint); + }; + std::unique_ptr p = std::make_unique(policy_name, policy_endpoints); + std::unique_ptr pm = std::make_unique(std::move(p)); + managers.push_back(std::move(pm)); + }; + + initialized = true; +} + +void RateLimitManager::Submit(QNetworkRequest network_request, Callback request_callback) +{ + std::unique_ptr request = std::make_unique(network_request, request_callback); + + // Use the default manager the endpoint is unknown. + if (request->endpoint == "") { + default_manager->QueueRequest(std::move(request)); + } else { + // Otherwise use the manager that knows this endpoint + for (auto& manager : managers) { + if (manager->IsApplicable(request->endpoint)) { + manager->QueueRequest(std::move(request)); + return; + }; + }; + // At this point, if none of the managers matched, then it's likely + // that policies have changed and ratelimits.json needs updating. + throw std::runtime_error("no policy matches the request endpoint: " + request->endpoint.toStdString()); + }; +} + +void RateLimitManager::Dispatch(std::unique_ptr request) +{ + // First, insert this request into the queue of waiting + // items so that the queue is always ordered based on + // request id. + if (finished_requests.empty()) { + + // The queue is empty. + finished_requests.push_back(std::move(request)); + + } else if (request->id > finished_requests.back()->id) { + + // The request belongs at the end. + finished_requests.push_back(std::move(request)); + + } else { + + // Find where in the queue this request fits. + for (auto pos = finished_requests.begin(); pos != finished_requests.end(); ++pos) { + + // Order by request id. + if (request->id < pos->get()->id) { + finished_requests.insert(pos, std::move(request)); + break; + }; + }; + }; + + // Second, check to see if we can send one or more + // requests back. + while (finished_requests.empty() == false) { + + // Stop if the next request isn't the one we are waiting for. + if (next_request_to_send != finished_requests.front()->id) { + break; + }; + + // Take this request off the front of the queue. + std::unique_ptr request = std::move(finished_requests.front()); + finished_requests.pop_front(); + ++next_request_to_send; + + // Trigger the callback for this request now. + request->worker_callback(request->network_reply); + request->network_reply->deleteLater(); + request = nullptr; + }; +} + +// Create a new rate limit manager based on an existing policy. +RateLimitManager::RateLimitManager(std::unique_ptr policy_) : + id(++manager_count), + requests_sent(0), + policy(std::move(policy_)), + busy(false), + violation(false), + next_send(QDateTime::currentDateTime()) +{ + active_request_timer.setSingleShot(true); + QObject::connect(&active_request_timer, &QTimer::timeout, [=]() { SendActiveRequest(); }); +} + +// Update this policy manager when the policy has changed. This means updating +// the number of reply times we keep around of the policy's limits have changed, +// and figuring out the next time a request can be sent without violating this +// policy. +void RateLimitManager::OnPolicyUpdate() { + + // Need to know the current number of items in the reply + // history so we don't try to read pas them. + const int current_history_size = known_reply_times.size(); + + // Determine how many hits we need to keep track of to keep track + // of this policy. + int new_history_capacity = 0; + + for (const auto& rule : policy->rules) { + + for (const auto& item : rule.items) { + + const int& current_hits = item.state.hits; + const int& maximum_hits = item.limit.hits; + const int& period_tested = item.limit.period; + + // Average time (in seconds) that requests can be submitted + // and meet the limitation imposed by this item. + const float t = float(item.limit.period) / float(item.limit.hits); + + // First, update the maximum history capacity if needed. + if (maximum_hits > new_history_capacity) { + new_history_capacity = maximum_hits; + }; + + // Second, check to see if we are at (or past) the current + // rate limit policy's maximum. If that's the case, we need + // to update the next time it will be safe to send a request. + // + // For example, if a limitation allows up to 10 requests in a + // 60 second period, then if there have already been 10 hits + // against that limitation, we cannot make another until the + // first of those 10 hits falls out of the 60 second period. + // + // This is why we store a history of reply times. + // + // However, it's possible we hit a rate limit policy on the + // very first request the application makes. This can happen + // if the application was just restarted after a prior rate + // limit violation. + // + // Therefore, there have been 10 hits in the last 60 seconds + // against the example policy, but the application only knows + // about 4 of them, then the best we can do is go back to + // the earliest of those 4 replies and add the restriction + // to that request's timestamp. + // + // This means the only time a real rate limit violation + // should occur is if the application's very first request + // is restricted. + if (current_hits >= maximum_hits) { + + // Determine how far back into the history we can look. + int n = current_hits; + if (n > current_history_size) { + n = current_history_size; + }; + + // Start with the timestamp of the earliest known + // reply relevant to this limitation. + QDateTime starting_time; + if (n < 1) { + starting_time = QDateTime::currentDateTime(); + } else { + starting_time = known_reply_times[n - 1]; + }; + + // Calculate the next time it will be safe to send a request. + const QDateTime next_safe_time = starting_time.addSecs(period_tested); + assert(next_safe_time.isValid()); + + // Update this manager's send time only if it's later + // than the manager thinks we need to wait. + if (next_safe_time > next_send) { + next_send = next_safe_time; + }; + }; + }; + }; + + // Grow the history capacity if needed. + if (known_reply_times.capacity() < new_history_capacity) { + QLOG_WARN() << policy->name + << "increasing history capacity" + << "from" << known_reply_times.capacity() + << "to" << new_history_capacity; + known_reply_times.set_capacity(new_history_capacity); + }; +} + +// Return true if this manager's policy applies to the endpoint. +bool RateLimitManager::IsApplicable(const QString& endpoint) { + return policy->endpoints.contains(endpoint); +} + +// If the rate limit manager is busy, the request will be queued. +// Otherwise, the request will be sent immediately, making the +// manager busy and causing subsequent requests to be queued. +void RateLimitManager::QueueRequest(std::unique_ptr request) { + if (busy) { + request->state = RequestState::QUEUED; + QLOG_TRACE() << policy->name << "queuing request" << request->id; + request_queue.push_back(std::move(request)); + } else { + busy = true; + active_request = std::move(request); + active_request->state = RequestState::ACTIVE; + SendActiveRequest(); + }; +} + +// Send the active request at the next time it will be safe to do so +// without violating the rate limit policy. +void RateLimitManager::SendActiveRequest() { + + assert(next_send.isValid()); + int msec_delay = QDateTime::currentDateTime().msecsTo(next_send); + int request_id = active_request->id; + if (msec_delay > 0) { + // Need to wait and rerun this function when it's safe to send. + QLOG_TRACE() << policy->name + << "waiting" << (msec_delay / 1000) + << "seconds to send active request" << active_request->id + << "at" << next_send.toLocalTime().toString(); + + // Update the request and start updating the program status with the timer. + active_request->state = RequestState::ACTIVE_WAITING; + active_request_timer.setInterval(msec_delay); + active_request_timer.start(); + + // Make sure the program status is being updated. + if (status_updater.isActive() == false) { + // The timer is running in another thread, so we have to use invokeMethod. + QMetaObject::invokeMethod(&status_updater, "start", Qt::QueuedConnection); + }; + + } else { + // Send immediately. + QLOG_TRACE() << policy->name + << "sending active request" << active_request->id + << "to" << active_request->endpoint + << "via" << active_request->network_request.url().toString(); + assert(active_request->network_reply == nullptr); + QNetworkReply* reply = network_manager->get(active_request->network_request); + active_request->state = RequestState::SENT; + active_request->network_reply = reply; + QObject::connect(reply, &QNetworkReply::finished, [=]() { ReceiveReply(); }); + }; +} + +// Called when the active request's network_reply is finished. +void RateLimitManager::ReceiveReply() { + + QNetworkReply* reply = active_request->network_reply; + active_request->state = RequestState::REPLIED; + active_request->reply_time = GetDate(reply); + active_request->reply_status = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); + + QLOG_TRACE() << policy->name + << "received reply for request" << active_request->id + << "with status code" << active_request->reply_status; + + if (reply->hasRawHeader("X-Rate-Limit-Policy")) { + + // Get the rate limit policy name from the header. + const QString reply_policy = reply->rawHeader("X-Rate-Limit-Policy"); + assert(reply_policy == policy->name); + + // This reply came with a policy update, so save the reply time. + known_reply_times.push_front(active_request->reply_time); + + // Update the new policy with all of the endpoints already + // associated with the existing policy, because that *should* + // never change. + QStringList current_endpoints = policy->endpoints; + if (current_endpoints.contains(active_request->endpoint) == false) { + current_endpoints.push_back(active_request->endpoint); + }; + policy = std::make_unique(reply); + policy->endpoints = current_endpoints; + + // Now examine the new policy and update ourselves accordingly. + OnPolicyUpdate(); + }; + + // Check for errors before dispatching the request + if (active_request->reply_status == RATE_LIMIT_VIOLATION_STATUS) { + + // There was a rate limit violation. + ResendAfterViolation(); + + } else if (active_request->network_reply->error() != QNetworkReply::NoError) { + + // Some other HTTP error was encountered. + QLOG_ERROR() << policy->name + << "request" << active_request->id + << "reply status was " << active_request->reply_status + << "and error was" << reply->error(); + throw std::runtime_error("unhandled http error"); + + } else { + + // No errors or violations, so move this request to the dispatch queue. + violation = false; + Dispatch(std::move(active_request)); + if (request_queue.empty()) { + busy = false; + } else { + // Stay busy and activate the next request in the queue. + active_request = std::move(request_queue.front()); + request_queue.pop_front(); + SendActiveRequest(); + }; + }; +} + +// A violation was detected, so we need to wait to resend the active request. +void RateLimitManager::ResendAfterViolation() +{ + // Set the violation flag now. It will be unset when a reply is received that doesn't + // indicat a violation. + violation = true; + + // Determine how long we need to wait. + const int delay_sec = active_request->network_reply->rawHeader("Retry-After").toInt(); + const int delay_msec = (delay_sec * 1000) + EXTRA_RATE_VIOLATION_MSEC; + QLOG_ERROR() << policy->name + << "RATE LIMIT VIOLATION on request" << active_request->id << "of" << delay_sec << "seconds"; + + // Update the time it will be safe to send again. + next_send = active_request->reply_time.addMSecs(delay_msec); + assert(next_send.isValid()); + + // Reset this request before resending it, which means + // letting QT know the assocated reply can be deleted. + active_request->network_reply->deleteLater(); + active_request->network_reply = nullptr; + active_request->reply_time = QDateTime(); + active_request->reply_status = -1; + SendActiveRequest(); +} + +// Update the program status based on how long we will have to wait for the next send. +void RateLimitManager::UpdateStatus() +{ + // Loop over all the managers and see which ones have an active request + // that is waiting to be sent. Of these, keep the details of the one + // that is the closes to being sent. + // + // Since mulitiple requests can be active and waiting at once, we just + // want to show how much time is left until whichever is going to be + // sent soonest. + bool found_one = false; + QDateTime time = QDateTime(); + QString policy_name = QString(); + bool violation = false; + + for (auto& manager : managers) { + // Only check managers that have a valid active request. + // Only check managers where the active request is waiting to be sent. + if ((manager->active_request != nullptr) && + (manager->active_request->state == RequestState::ACTIVE_WAITING)) { + // Save some details from this active request if it's the first + // waiting request we've encountered, or if it's going to be + // sent sooner that the last one we found. + if ((found_one == false) || (manager->next_send <= time)) { + time = manager->next_send; + policy_name = manager->policy->name; + violation = manager->violation; + found_one = true; + }; + }; + }; + + // Either update the status bar if we found an actively waiting request, + // or stop the time. + if (found_one) { + // Update the program status bar. + CurrentStatusUpdate status; + if (violation) { + status.state = ProgramState::RateLimitViolation; + status.message = "RATE LIMIT VIOLATION: retrying in %1 seconds at %2 due to %3"; + } else { + status.state = ProgramState::RateLimitPause; + status.message = "RATE LIMIT THROTTLE: waiting %1 seconds until %2 due to %3"; + }; + status.message = status.message + .arg(QDateTime::currentDateTime().secsTo(time)) + .arg(time.toLocalTime().toString()) + .arg(policy_name); + emit items_worker->StatusUpdate(status); + } else { + // No active requests appear to be waiting. Because the timer is running + // in another thread, we use invokeMethod here. + QMetaObject::invokeMethod(&status_updater, "stop", Qt::QueuedConnection); + } +} + +//========================================================================================= +// CONNECT PUBLIC FUNCTIONS TO INTERNAL IMPLEMENTATION +//========================================================================================= + +// Construct a new rate limiter +void RateLimit::Init(QNetworkAccessManager* manager, ItemsManagerWorker* worker) +{ + RateLimitManager::Init(manager, worker); +} + +// Submit a new request. +void RateLimit::Submit(QNetworkRequest network_request, Callback request_callback) +{ + RateLimitManager::Submit(network_request, request_callback); +} diff --git a/src/ratelimit.h b/src/ratelimit.h new file mode 100644 index 000000000..7c97bd1a4 --- /dev/null +++ b/src/ratelimit.h @@ -0,0 +1,112 @@ +/* + Copyright 2023 Tom Holz + + This file is part of Acquisition. + + Acquisition is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Acquisition is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with Acquisition. If not, see . +*/ + +#pragma once + +// This header defines the RateLimiter class that ItemsManagerWorker +// will use to limit how often API requests are sent to pathofexile.com. +// +// The source file ratelimit.cpp defines a number of companion classes +// that are used to implement the rating limiting behavior, but there +// is no need to expose any of that to the rest of the application, so +// it's all hidden behind a PIMPL design. +// +// See https://herbsutter.com/gotw/_100/ for details on PIMPL in C++11 + +#include +#include +#include + +class ItemsManagerWorker; + +//-------------------------------------------------------------------------- +// Introduction to GGG's API Rate Limits +//-------------------------------------------------------------------------- +// +// As of August 2023, GGG has implemented "new" rate limit policies that are +// incompatible with how acquisition used to throttle requests. There was no +// obvious way to fix the network request code used by the ItemsManagerWorker. +// Instead, I've done instead is create a wrapper that accepts network requests +// from the ItemsManagerWorker and hidden the implementation details. +// +// See https://www.pathofexile.com/developer/docs for more details on the rate +// limit information that is included in the HTTP headers of the network reply +// to every API request. +// +// Those rate limit policies can change at any time, from one network +// call to the next. For example, GGG might decide to temporarily tighten rate +// limitations around a league launch. +// +// This means that any solution that hard-codes things like delays will +// eventually break. +// +// What this means for us is that we have to check the reply to every +// network request for the details of rate-limitation policy that were applied. +// +// Each policy can have mulitple rules that apply to it. Each rule +// can have multiple elments. Each element defines the specific number of +// API hits that are allowed, the period within which those hits are measured, +// and the timeout restriction if that item is violated. +// +// The RateLimiter class defined below takes care of all of that. +// +// Specifically, there are a number of helper functions and classes in +// ratelimit.cpp that are used to keep track of everything, limit network +// request as little as possible, and do all of that transparently, so that +// ItemsManagerWorker or the rest of the application don't need to be changed. +// +// More specifically, the RateLimiter::Submit method takes a network request +// and a callback function. See the code in itemsmanagerworker.cpp for +// examples of how this is used. +// +// Submitted requests are sent out serially. No request is sent until a reply +// to the previous request is received. If a rate limit violation is detected, +// a request will be resent after the required delay. This allows the wrapper +// to monitor the exact state of all the rate-limit policies and inject delays +// as necessary to avoid violating rate limit policies. +// +// This approach also alows us to forgo hardcoding anything about the rate +// limits in the source code. Instead, everything about the rate limits is +// take from the relevant HTTP headers in each network reply. +// +// WARNINGS: +// +// Rate limit violations may occur due to bugs, but they can also happen +// on application startup. For example, someone runs into a rate limit +// violation, then restarts the program to try again. If the restart +// happens before the restriction expires, the new application instance +// could hit the rate limt on its very first request. +// +// This code is "fragile". It uses asserts and throws exceptions when +// some errors are detected. As a result, this code won't call the +// requested callback function unless an error-freeresponse was received. +// +// This code may also break where there are other sources of rate-limited +// API requests. Capturing all the requests coming from one computer would +// require hooking into the network at a deeper level than a single +// application, but there's probably very few users with use case like this. + +namespace RateLimit +{ + typedef std::function Callback; + + void Init(QNetworkAccessManager* manager, ItemsManagerWorker* worker); + + void Submit(QNetworkRequest request, Callback callback); +} \ No newline at end of file From d1f3a8d117c434ec2a2b0ef3ecc1e7170f9642d9 Mon Sep 17 00:00:00 2001 From: Tom Holz Date: Sun, 6 Aug 2023 08:36:08 -0700 Subject: [PATCH 2/2] fixed program status update --- src/mainwindow.cpp | 12 ++++++++++++ src/ratelimit.cpp | 5 ++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/mainwindow.cpp b/src/mainwindow.cpp index ab47cefc8..baa8904b2 100755 --- a/src/mainwindow.cpp +++ b/src/mainwindow.cpp @@ -405,6 +405,18 @@ void MainWindow::OnStatusUpdate(const CurrentStatusUpdate &status) { case ProgramState::ItemsRetrieved: title = QString("Parsing item mods in tabs, %1/%2").arg(status.progress).arg(status.total); break; + case ProgramState::RateLimitPause: + case ProgramState::RateLimitViolation: + { + title = status_bar_label_->text(); + const qsizetype ratelimit_loc = title.indexOf(" ... RATE LIMIT "); + if (ratelimit_loc >= 0) + { + title.truncate(ratelimit_loc); + }; + title.append(" ... RATE LIMIT " + status.message); + }; + break; default: title = "Unknown"; } diff --git a/src/ratelimit.cpp b/src/ratelimit.cpp index 5ac6f5501..221f7e31b 100644 --- a/src/ratelimit.cpp +++ b/src/ratelimit.cpp @@ -927,14 +927,13 @@ void RateLimitManager::UpdateStatus() CurrentStatusUpdate status; if (violation) { status.state = ProgramState::RateLimitViolation; - status.message = "RATE LIMIT VIOLATION: retrying in %1 seconds at %2 due to %3"; + status.message = "VIOLATION: retrying in %1 seconds due to %2"; } else { status.state = ProgramState::RateLimitPause; - status.message = "RATE LIMIT THROTTLE: waiting %1 seconds until %2 due to %3"; + status.message = "THROTTLE: waiting %1 seconds due to %2"; }; status.message = status.message .arg(QDateTime::currentDateTime().secsTo(time)) - .arg(time.toLocalTime().toString()) .arg(policy_name); emit items_worker->StatusUpdate(status); } else {