From 43ef5f2b689216df98a20925e28aded618b4dfcb Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 29 Oct 2025 12:33:43 +0100 Subject: [PATCH 01/14] Add functions to move out cycles --- src/mAIner/src/Main.mo | 150 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index 7f60011..c2b2188 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -270,6 +270,156 @@ actor class MainerAgentCtrlbCanister() = this { }; }; + // Share Service: flag to decide whether cycles should be sent to LLMs automatically as part of flow + stable var SEND_CYCLES_TO_LLM : Bool = true; + + public shared (msg) func toggleSendCyclesToLlmFlagAdmin() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + SEND_CYCLES_TO_LLM := not SEND_CYCLES_TO_LLM; + let authRecord = { auth = "You set the flag to " # debug_show(SEND_CYCLES_TO_LLM) }; + return #Ok(authRecord); + }; + + public query (msg) func getSendCyclesToLlmFlagAdmin() : async Types.FlagResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + return #Ok({ flag = SEND_CYCLES_TO_LLM }); + }; + + // Share Service: Move cycles to operator's wallet (e.g. onicai) + stable let OPERATOR_WALLET_ADDRESS : Text = ""; + stable var cyclesTransactionsStorage : List.List = List.nil(); + + public query (msg) func getCyclesTransactionsAdmin() : async Types.CyclesTransactionsResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + return #Ok(List.toArray(cyclesTransactionsStorage)); + }; + + stable var MIN_CYCLES_BALANCE : Nat = 30 * Constants.CYCLES_TRILLION; + stable var CYCLES_AMOUNT_TO_OPERATOR : Nat = 10 * Constants.CYCLES_TRILLION; + + public shared (msg) func sendCyclesToOperatorAdmin() : async Types.AddCyclesResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let currentCyclesBalance : Nat = Cycles.balance(); + try { + // Only move cycles if cycles balance is big enough + if (currentCyclesBalance - CYCLES_AMOUNT_TO_OPERATOR < MIN_CYCLES_BALANCE) { + D.print("Challenger: sendCyclesToGameStateCanister - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); + return #Err(#Unauthorized); + }; + + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + D.print("Challenger: sendCyclesToGameStateCanister gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); + D.print("Challenger: sendCyclesToGameStateCanister - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); + Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); + + D.print("Challenger: sendCyclesToGameStateCanister - calling gameStateCanisterActor.addCycles"); + let addCyclesResponse = await gameStateCanisterActor.addCycles(); + D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResponse: " # debug_show(addCyclesResponse)); + switch (addCyclesResponse) { + case (#Err(error)) { + D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResponse FailedOperation: " # debug_show(error)); + // Store the failed attempt + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = false; + previousCyclesBalance : Nat = currentCyclesBalance; + }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + return #Err(#FailedOperation); + }; + case (#Ok(addCyclesResult)) { + D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResult: " # debug_show(addCyclesResult)); + // Store the transaction + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = true; + previousCyclesBalance : Nat = currentCyclesBalance; + }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + return addCyclesResponse; + }; + }; + } catch (e) { + D.print("Challenger: sendCyclesToGameStateCanister - Failed to send cycles to Game State: " # Error.message(e)); + // Store the failed attempt + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = false; + previousCyclesBalance : Nat = currentCyclesBalance; + }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + return #Err(#Other("Challenger: sendCyclesToGameStateCanister - Failed to send cycles to Game State: " # Error.message(e))); + }; + }; + + public shared (msg) func setMinCyclesBalanceAdmin(newCyclesBalance : Nat) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (newCyclesBalance < 20 * Constants.CYCLES_TRILLION) { + return #Err(#StatusCode(401)); + }; + MIN_CYCLES_BALANCE := newCyclesBalance; + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getMinCyclesBalanceAdmin() : async Nat { + if (not Principal.isController(msg.caller)) { + return 0; + }; + + return MIN_CYCLES_BALANCE; + }; + + public shared (msg) func setCyclesToSendToOperatorAdmin(newValue : Nat) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (newValue > 100 * Constants.CYCLES_TRILLION) { + return #Err(#StatusCode(401)); + }; + CYCLES_AMOUNT_TO_OPERATOR := newValue; + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getCyclesToSendToOperatorAdmin() : async Nat { + if (not Principal.isController(msg.caller)) { + return 0; + }; + + return CYCLES_AMOUNT_TO_OPERATOR; + }; + // -------------------------------------------------------------------------- // Orthogonal Persisted Data storage From a7b883d74801ee860d4452a18cb11348fa07f379 Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 29 Oct 2025 15:29:19 +0100 Subject: [PATCH 02/14] Add check whether cycles should be sent to LLMs --- src/mAIner/src/Main.mo | 101 ++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 57 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index c2b2188..83afe17 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -297,7 +297,7 @@ actor class MainerAgentCtrlbCanister() = this { }; // Share Service: Move cycles to operator's wallet (e.g. onicai) - stable let OPERATOR_WALLET_ADDRESS : Text = ""; + stable let OPERATOR_WALLET_ADDRESS : Text = "jh35u-eqaaa-aaaag-abf3a-cai"; stable var cyclesTransactionsStorage : List.List = List.nil(); public query (msg) func getCyclesTransactionsAdmin() : async Types.CyclesTransactionsResult { @@ -324,50 +324,35 @@ actor class MainerAgentCtrlbCanister() = this { try { // Only move cycles if cycles balance is big enough if (currentCyclesBalance - CYCLES_AMOUNT_TO_OPERATOR < MIN_CYCLES_BALANCE) { - D.print("Challenger: sendCyclesToGameStateCanister - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); + D.print("Challenger: sendCyclesToOperatorAdmin - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); return #Err(#Unauthorized); }; - let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; - D.print("Challenger: sendCyclesToGameStateCanister gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); - D.print("Challenger: sendCyclesToGameStateCanister - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); + D.print("Challenger: sendCyclesToOperatorAdmin - OPERATOR_WALLET_ADDRESS: " # debug_show(OPERATOR_WALLET_ADDRESS)); + D.print("Challenger: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); - - D.print("Challenger: sendCyclesToGameStateCanister - calling gameStateCanisterActor.addCycles"); - let addCyclesResponse = await gameStateCanisterActor.addCycles(); - D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResponse: " # debug_show(addCyclesResponse)); - switch (addCyclesResponse) { - case (#Err(error)) { - D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResponse FailedOperation: " # debug_show(error)); - // Store the failed attempt - let transactionEntry : Types.CyclesTransaction = { - amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; - newOfficialCycleBalance : Nat = Cycles.balance(); - creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); - sentBy : Principal = msg.caller; - succeeded : Bool = false; - previousCyclesBalance : Nat = currentCyclesBalance; - }; - cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - return #Err(#FailedOperation); - }; - case (#Ok(addCyclesResult)) { - D.print("Challenger: sendCyclesToGameStateCanister - addCyclesResult: " # debug_show(addCyclesResult)); - // Store the transaction - let transactionEntry : Types.CyclesTransaction = { - amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; - newOfficialCycleBalance : Nat = Cycles.balance(); - creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); - sentBy : Principal = msg.caller; - succeeded : Bool = true; - previousCyclesBalance : Nat = currentCyclesBalance; - }; - cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - return addCyclesResponse; - }; + // Send via system API + D.print("Challenger: sendCyclesToOperatorAdmin - calling system API to send cycles"); + let deposit_cycles_args = { canister_id : Principal = Principal.fromText(OPERATOR_WALLET_ADDRESS); }; + let _ = ignore IC0.deposit_cycles(deposit_cycles_args); + D.print("Challenger: sendCyclesToOperatorAdmin - deposit_cycles successful"); + // Store the transaction + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = true; + previousCyclesBalance : Nat = currentCyclesBalance; }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + let addCyclesResponse : Types.AddCyclesRecord = { + added : Bool = true; + amount : Nat = CYCLES_AMOUNT_TO_OPERATOR; + }; + return #Ok(addCyclesResponse); } catch (e) { - D.print("Challenger: sendCyclesToGameStateCanister - Failed to send cycles to Game State: " # Error.message(e)); + D.print("Challenger: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e)); // Store the failed attempt let transactionEntry : Types.CyclesTransaction = { amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; @@ -378,7 +363,7 @@ actor class MainerAgentCtrlbCanister() = this { previousCyclesBalance : Nat = currentCyclesBalance; }; cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - return #Err(#Other("Challenger: sendCyclesToGameStateCanister - Failed to send cycles to Game State: " # Error.message(e))); + return #Err(#Other("Challenger: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e))); }; }; @@ -1367,26 +1352,28 @@ actor class MainerAgentCtrlbCanister() = this { }; }; - // First send cycles to the LLM - var cyclesAdded : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; - if (MAINER_AGENT_CANISTER_TYPE == #Own) { - cyclesAdded := challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; // TODO: adjust for mAIners with setting LOW or MEDIUM - }; - try { - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling Cycles.add for = " # debug_show(cyclesAdded) # " Cycles"); - Cycles.add(cyclesAdded); + // First send cycles to the LLM, if enabled + if (SEND_CYCLES_TO_LLM) { + var cyclesAdded : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; + if (MAINER_AGENT_CANISTER_TYPE == #Own) { + cyclesAdded := challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; // TODO: adjust for mAIners with setting LOW or MEDIUM + }; + try { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling Cycles.add for = " # debug_show(cyclesAdded) # " Cycles"); + Cycles.add(cyclesAdded); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling IC0.deposit_cycles for LLM " # debug_show(llmCanisterPrincipal)); - let deposit_cycles_args = { canister_id : Principal = llmCanisterPrincipal; }; - let _ = await IC0.deposit_cycles(deposit_cycles_args); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling IC0.deposit_cycles for LLM " # debug_show(llmCanisterPrincipal)); + let deposit_cycles_args = { canister_id : Principal = llmCanisterPrincipal; }; + let _ = await IC0.deposit_cycles(deposit_cycles_args); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Successfully deposited " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal) ); - } catch (e) { - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal)); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit error is" # Error.message(e)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Successfully deposited " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal) ); + } catch (e) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit error is" # Error.message(e)); - return #Err(#FailedOperation); - }; + return #Err(#FailedOperation); + }; + }; let generationId : Text = await Utils.newRandomUniqueId(); From 2b7314352285816631b7acf8a3ac8dace461638d Mon Sep 17 00:00:00 2001 From: patnorris Date: Thu, 30 Oct 2025 18:32:10 +0100 Subject: [PATCH 03/14] Fix typos --- src/mAIner/src/Main.mo | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index 83afe17..ec80792 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -324,18 +324,18 @@ actor class MainerAgentCtrlbCanister() = this { try { // Only move cycles if cycles balance is big enough if (currentCyclesBalance - CYCLES_AMOUNT_TO_OPERATOR < MIN_CYCLES_BALANCE) { - D.print("Challenger: sendCyclesToOperatorAdmin - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); + D.print("ShareService: sendCyclesToOperatorAdmin - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); return #Err(#Unauthorized); }; - D.print("Challenger: sendCyclesToOperatorAdmin - OPERATOR_WALLET_ADDRESS: " # debug_show(OPERATOR_WALLET_ADDRESS)); - D.print("Challenger: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); + D.print("ShareService: sendCyclesToOperatorAdmin - OPERATOR_WALLET_ADDRESS: " # debug_show(OPERATOR_WALLET_ADDRESS)); + D.print("ShareService: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); // Send via system API - D.print("Challenger: sendCyclesToOperatorAdmin - calling system API to send cycles"); + D.print("ShareService: sendCyclesToOperatorAdmin - calling system API to send cycles"); let deposit_cycles_args = { canister_id : Principal = Principal.fromText(OPERATOR_WALLET_ADDRESS); }; let _ = ignore IC0.deposit_cycles(deposit_cycles_args); - D.print("Challenger: sendCyclesToOperatorAdmin - deposit_cycles successful"); + D.print("ShareService: sendCyclesToOperatorAdmin - deposit_cycles successful"); // Store the transaction let transactionEntry : Types.CyclesTransaction = { amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; @@ -352,7 +352,7 @@ actor class MainerAgentCtrlbCanister() = this { }; return #Ok(addCyclesResponse); } catch (e) { - D.print("Challenger: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e)); + D.print("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e)); // Store the failed attempt let transactionEntry : Types.CyclesTransaction = { amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; @@ -363,7 +363,7 @@ actor class MainerAgentCtrlbCanister() = this { previousCyclesBalance : Nat = currentCyclesBalance; }; cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - return #Err(#Other("Challenger: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e))); + return #Err(#Other("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e))); }; }; From 229d5786c0b2bb571c092e830e01fe88b0638d05 Mon Sep 17 00:00:00 2001 From: patnorris Date: Thu, 30 Oct 2025 18:37:50 +0100 Subject: [PATCH 04/14] Update print statements --- src/mAIner/src/Main.mo | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index ec80792..927c5de 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -332,10 +332,10 @@ actor class MainerAgentCtrlbCanister() = this { D.print("ShareService: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); // Send via system API - D.print("ShareService: sendCyclesToOperatorAdmin - calling system API to send cycles"); + D.print("ShareService: sendCyclesToOperatorAdmin - calling system API's deposit_cycles to send cycles"); let deposit_cycles_args = { canister_id : Principal = Principal.fromText(OPERATOR_WALLET_ADDRESS); }; let _ = ignore IC0.deposit_cycles(deposit_cycles_args); - D.print("ShareService: sendCyclesToOperatorAdmin - deposit_cycles successful"); + D.print("ShareService: sendCyclesToOperatorAdmin - called deposit_cycles with ignore"); // Store the transaction let transactionEntry : Types.CyclesTransaction = { amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; @@ -346,6 +346,7 @@ actor class MainerAgentCtrlbCanister() = this { previousCyclesBalance : Nat = currentCyclesBalance; }; cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + D.print("ShareService: sendCyclesToOperatorAdmin - stored transactionEntry: " # debug_show(transactionEntry)); let addCyclesResponse : Types.AddCyclesRecord = { added : Bool = true; amount : Nat = CYCLES_AMOUNT_TO_OPERATOR; From bfb73ccd17f206e90f78b8f55c23d31542c46ac2 Mon Sep 17 00:00:00 2001 From: patnorris Date: Fri, 31 Oct 2025 10:56:47 +0100 Subject: [PATCH 05/14] Add dedicated folder for ShareService to decouple code from mAIners --- src/ShareService/.gitignore | 1 + src/ShareService/README.md | 54 + src/ShareService/canister_ids.json | 10 + src/ShareService/dfx.json | 52 + src/ShareService/mops.toml | 6 + src/ShareService/scripts/deploy.sh | 135 + src/ShareService/scripts/register-check.sh | 69 + .../scripts/register-game-state.sh | 110 + .../register-llms-or-mainer-service.sh | 221 ++ src/ShareService/scripts/top-off.sh | 77 + src/ShareService/src/Main.mo | 2479 +++++++++++++++++ src/ShareService/src/Utils.mo | 126 + src/ShareService/test/__init__.py | 0 src/ShareService/test/conftest.py | 9 + src/ShareService/test/test_apis.py | 92 + 15 files changed, 3441 insertions(+) create mode 100644 src/ShareService/.gitignore create mode 100644 src/ShareService/README.md create mode 100644 src/ShareService/canister_ids.json create mode 100644 src/ShareService/dfx.json create mode 100644 src/ShareService/mops.toml create mode 100755 src/ShareService/scripts/deploy.sh create mode 100755 src/ShareService/scripts/register-check.sh create mode 100755 src/ShareService/scripts/register-game-state.sh create mode 100755 src/ShareService/scripts/register-llms-or-mainer-service.sh create mode 100755 src/ShareService/scripts/top-off.sh create mode 100644 src/ShareService/src/Main.mo create mode 100644 src/ShareService/src/Utils.mo create mode 100644 src/ShareService/test/__init__.py create mode 100644 src/ShareService/test/conftest.py create mode 100644 src/ShareService/test/test_apis.py diff --git a/src/ShareService/.gitignore b/src/ShareService/.gitignore new file mode 100644 index 0000000..495afe5 --- /dev/null +++ b/src/ShareService/.gitignore @@ -0,0 +1 @@ +.mops \ No newline at end of file diff --git a/src/ShareService/README.md b/src/ShareService/README.md new file mode 100644 index 0000000..dd3ebef --- /dev/null +++ b/src/ShareService/README.md @@ -0,0 +1,54 @@ +# ShareService +This canister serves as a Generative AI service to mAIners and thus has multiple LLM canisters attached. + +## Reset a mAIner +```bash +NETWORK=prd +USER_ID=... +# Get the correct mAIner canister id, by running from funnAI folder: +scripts/get_mainers.sh --network $NETWORK --user $USER_ID + +# Verify that the canister id is in the canister_ids.json in this folder. +# If it is not, do not edit it yourself, but run from funnAI folder: +scripts/get_mainers.sh --network $NETWORK + +# Then, upgrade the mAIner from this folder (mAIner): +# Get the mainer_ctrlb_canister_## from the canister_ids.json and run: +MAINER=mainer_ctrlb_canister_## + +# verify logs and make sure it is Ok to upgrade (nothing in the queue) +dfx canister --network $NETWORK logs $MAINER --follow +dfx canister --network $NETWORK call $MAINER getChallengeQueueAdmin --output json + +# toggle maintenance flag +dfx canister --network $NETWORK call $MAINER getMaintenanceFlag +dfx canister --network $NETWORK call $MAINER toggleMaintenanceFlagAdmin # if needed + +# stop & snapshot & start +dfx canister --network $NETWORK stop $MAINER +dfx canister --network $NETWORK snapshot create $MAINER +dfx canister --network $NETWORK start $MAINER + +# Upgrade & start Timer & toggle maintenance flag +# IMPORTANT: make sure the correct branch is checked out !!!!!!!!!!!!!! +dfx deploy --network $NETWORK $MAINER --mode upgrade +dfx canister --network $NETWORK call $MAINER startTimerExecutionAdmin +dfx canister --network $NETWORK call $MAINER getMaintenanceFlag +dfx canister --network $NETWORK call $MAINER toggleMaintenanceFlagAdmin # if needed + +# verify everything looks good (timer should have been restarted) +dfx canister --network $NETWORK logs $MAINER + +# if it does not look good, restore the snapshot +dfx canister --network $NETWORK snapshot list $MAINER +dfx canister --network $NETWORK stop $MAINER +dfx canister --network $NETWORK snapshot load $MAINER +dfx canister --network $NETWORK start $MAINER +dfx canister --network $NETWORK call $MAINER startTimerExecutionAdmin +dfx canister --network $NETWORK call $MAINER getMaintenanceFlag +dfx canister --network $NETWORK call $MAINER toggleMaintenanceFlagAdmin # if needed + +# if it looks good, delete the snapshot +dfx canister --network $NETWORK snapshot list $MAINER +dfx canister --network $NETWORK snapshot delete $MAINER +``` \ No newline at end of file diff --git a/src/ShareService/canister_ids.json b/src/ShareService/canister_ids.json new file mode 100644 index 0000000..aba0a10 --- /dev/null +++ b/src/ShareService/canister_ids.json @@ -0,0 +1,10 @@ +{ + "mainer_service_canister": { + "development": "ecpt4-ayaaa-aaaad-qhk4a-cai", + "ic": "", + "local": "", + "testing": "vtebo-riaaa-aaaam-qdxgq-cai", + "demo": "4xoqq-waaaa-aaaaj-a2bvq-cai", + "prd": "rilmv-caaaa-aaaaa-qandq-cai" + } +} \ No newline at end of file diff --git a/src/ShareService/dfx.json b/src/ShareService/dfx.json new file mode 100644 index 0000000..598d011 --- /dev/null +++ b/src/ShareService/dfx.json @@ -0,0 +1,52 @@ +{ + "version": 1, + "canisters": { + "mainer_service_canister": { + "main": "src/Main.mo" + } + }, + "defaults": { + "build": { + "packtool": "mops sources" + } + }, + "networks": { + "development": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + }, + "testing": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + }, + "prd": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + }, + "demo": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + }, + "ic": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + }, + "backup": { + "providers": [ + "https://icp0.io" + ], + "type": "persistent" + } + }, + "output_env_file": ".env" +} \ No newline at end of file diff --git a/src/ShareService/mops.toml b/src/ShareService/mops.toml new file mode 100644 index 0000000..d9ffed7 --- /dev/null +++ b/src/ShareService/mops.toml @@ -0,0 +1,6 @@ +[dependencies] +base = "0.13.5" +accountid = "https://github.com/aviate-labs/principal.mo#main" +hex = "https://github.com/letmejustputthishere/motoko-hex#master" +canistergeek = "https://github.com/usergeek/canistergeek-ic-motoko#v0.0.3" +uuid = "https://github.com/aviate-labs/uuid.mo#v0.2.0" diff --git a/src/ShareService/scripts/deploy.sh b/src/ShareService/scripts/deploy.sh new file mode 100755 index 0000000..5d44b83 --- /dev/null +++ b/src/ShareService/scripts/deploy.sh @@ -0,0 +1,135 @@ +#!/bin/bash + +####################################################################### +# run from parent folder as: +# scripts/deploy.sh --network [local|ic] +####################################################################### + +# Default network type is local +NETWORK_TYPE="local" +DEPLOY_MODE="install" + +NUM_MAINERS_DEPLOYED=3 +MAINER_CANISTER_TYPES=("ShareAgent" "ShareAgent" "Own" ) + +# When deploying to IC, we deploy to a specific subnet +# none will not use subnet parameter in deploy to ic +# SUBNET="none" +SUBNET="qdvhd-os4o2-zzrdw-xrcv4-gljou-eztdp-bj326-e6jgr-tkhuc-ql6v2-yqe" + +# Parse command line arguments for network type +while [ $# -gt 0 ]; do + case "$1" in + --network) + shift + if [ "$1" = "local" ] || [ "$1" = "ic" ] || [ "$1" = "testing" ] || [ "$1" = "development" ] || [ "$1" = "demo" ] || [ "$1" = "prd" ]; then + NETWORK_TYPE=$1 + else + echo "Invalid network type: $1. Use 'local' or 'ic' or 'testing." + exit 1 + fi + shift + ;; + --mode) + shift + if [ "$1" = "install" ] || [ "$1" = "reinstall" ] || [ "$1" = "upgrade" ]; then + DEPLOY_MODE=$1 + else + echo "Invalid mode: $1. Use 'install', 'reinstall' or 'upgrade'." + exit 1 + fi + shift + ;; + *) + echo "Unknown argument: $1" + echo "Usage: $0 --network [local|ic|testing|development|demo|prd]" + exit 1 + ;; + esac +done + +echo "Using network type: $NETWORK_TYPE" + +####################################################################### +echo " " +echo "===================================================" +MAINER="mainer_service_canister" +echo "Deploying the protocol's $MAINER" +if [ "$NETWORK_TYPE" = "ic" ] || [ "$NETWORK_TYPE" = "testing" ] || [ "$NETWORK_TYPE" = "development" ] || [ "$NETWORK_TYPE" = "demo" ] || [ "$NETWORK_TYPE" = "prd" ]; then + if [ "$SUBNET" = "none" ]; then + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE + else + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE --subnet $SUBNET + fi +else + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE +fi + +echo " " +echo "--------------------------------------------------" +dfx canister call $MAINER setMainerCanisterType '(variant {ShareService} )' --network $NETWORK_TYPE +echo "verify getMainerCanisterType: " +dfx canister call $MAINER getMainerCanisterType --network $NETWORK_TYPE + +echo " " +echo "--------------------------------------------------" +echo "Checking health endpoint" +output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 +else + echo "$MAINER is healthy." +fi + +####################################################################### +echo " " +echo "===================================================" +echo "Deploying $NUM_MAINERS_DEPLOYED mainer Agents" +mainer_id_start=0 +mainer_id_end=$((NUM_MAINERS_DEPLOYED - 1)) + +for m in $(seq $mainer_id_start $mainer_id_end) +do + + MAINER="mainer_ctrlb_canister_$m" + + echo " " + echo "--------------------------------------------------" + echo "Deploying $MAINER" + + if [ "$NETWORK_TYPE" = "ic" ] || [ "$NETWORK_TYPE" = "testing" ] || [ "$NETWORK_TYPE" = "development" ] || [ "$NETWORK_TYPE" = "demo" ] || [ "$NETWORK_TYPE" = "prd" ]; then + if [ "$SUBNET" = "none" ]; then + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE + else + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE --subnet $SUBNET + fi + else + dfx deploy $MAINER --mode $DEPLOY_MODE --yes --network $NETWORK_TYPE + fi + + echo " " + echo "--------------------------------------------------" + echo "setMainerCanisterType to ${MAINER_CANISTER_TYPES[$m]}" + dfx canister call $MAINER setMainerCanisterType "(variant {${MAINER_CANISTER_TYPES[$m]}} )" --network $NETWORK_TYPE + echo "verify getMainerCanisterType: " + dfx canister call $MAINER getMainerCanisterType --network $NETWORK_TYPE + + echo " " + echo "--------------------------------------------------" + echo "Checking health endpoint" + output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 + else + echo "$MAINER is healthy." + fi +done + +echo " " +echo "--------------------------------------------------" +echo "Generating bindings for a frontend" +dfx generate \ No newline at end of file diff --git a/src/ShareService/scripts/register-check.sh b/src/ShareService/scripts/register-check.sh new file mode 100755 index 0000000..989305a --- /dev/null +++ b/src/ShareService/scripts/register-check.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +####################################################################### +# run from parent folder as: +# scripts/register-check.sh --network [local|ic] +####################################################################### + +# Default network type is local +NETWORK_TYPE="local" +NUM_MAINERS_DEPLOYED=3 +MAINER_CANISTER_TYPES=("ShareAgent" "ShareAgent" "Own" ) + +# Parse command line arguments for network type +while [ $# -gt 0 ]; do + case "$1" in + --network) + shift + if [ "$1" = "local" ] || [ "$1" = "ic" ] || [ "$1" = "testing" ] || [ "$1" = "development" ] || [ "$1" = "demo" ] || [ "$1" = "prd" ]; then + NETWORK_TYPE=$1 + else + echo "Invalid network type: $1. Use 'local' or 'ic' or 'testing' or 'development' or 'demo' or 'prd'." + exit 1 + fi + shift + ;; + *) + echo "Unknown argument: $1" + echo "Usage: $0 --network [local|ic]" + exit 1 + ;; + esac +done + +echo "Using network type: $NETWORK_TYPE" + +############################################################################ +MAINER="mainer_service_canister" +echo " " +echo "Checking if $MAINER is a controller of its registered LLM canisters" +output=$(dfx canister call $MAINER checkAccessToLLMs --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "ERROR: $MAINER is not a controller of all its LLMs. Make sure to update the LLMs." + exit 1 +else + echo "$MAINER is a controller of all its LLMs." +fi + + +mainer_id_start=0 +mainer_id_end=$((NUM_MAINERS_DEPLOYED - 1)) + +for m in $(seq $mainer_id_start $mainer_id_end) +do + MAINER="mainer_ctrlb_canister_$m" + MAINER_CANISTER_TYPE=${MAINER_CANISTER_TYPES[$m]} + if ($MAINER_CANISTER_TYPE == "Own"); then + echo " " + echo "Checking if $MAINER is a controller of its registered LLM canisters" + output=$(dfx canister call $MAINER checkAccessToLLMs --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "ERROR: $MAINER is not a controller of all its LLMs. Make sure to update the LLMs." + exit 1 + else + echo "$MAINER is a controller of all its LLMs." + fi + fi +done \ No newline at end of file diff --git a/src/ShareService/scripts/register-game-state.sh b/src/ShareService/scripts/register-game-state.sh new file mode 100755 index 0000000..cbefad8 --- /dev/null +++ b/src/ShareService/scripts/register-game-state.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +####################################################################### +# run from parent folder as: +# scripts/register-game-state.sh --network [local|ic] +####################################################################### + +# Default network type is local +NETWORK_TYPE="local" + +NUM_MAINERS_DEPLOYED=3 + +# When deploying local, use canister IDs from .env +# Use this when deploying from funnAI +source ../../../.env +# Use this when deploying from PoAIW +# source ../GameState/.env + +# none will not use subnet parameter in deploy to ic +SUBNET="none" + +# Parse command line arguments for network type +while [ $# -gt 0 ]; do + case "$1" in + --network) + shift + if [ "$1" = "local" ] || [ "$1" = "ic" ] || [ "$1" = "testing" ] || [ "$1" = "development" ] || [ "$1" = "demo" ] || [ "$1" = "prd" ]; then + NETWORK_TYPE=$1 + if [ "$NETWORK_TYPE" = "ic" ]; then + CANISTER_ID_GAME_STATE_CANISTER='xzpy6-hiaaa-aaaaj-az4pq-cai' + fi + else + echo "Invalid network type: $1. Use 'local' or 'ic' or 'testing' or 'development' or 'demo' or 'prd'." + exit 1 + fi + shift + ;; + *) + echo "Unknown argument: $1" + echo "Usage: $0 --network [local|ic]" + exit 1 + ;; + esac +done + +echo "Using network type: $NETWORK_TYPE" + +####################################################################### +echo " " +echo "===================================================" +MAINER="mainer_service_canister" +echo " " +echo "--------------------------------------------------" +echo "Checking health endpoint for $MAINER" +output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 +else + echo "$MAINER is healthy." +fi + +echo " " +echo "--------------------------------------------------" +echo "Registering GameState with the $MAINER" +output=$(dfx canister call $MAINER setGameStateCanisterId "(\"$CANISTER_ID_GAME_STATE_CANISTER\")" --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling setGameStateCanisterId for GameState $CANISTER_ID_GAME_STATE_CANISTER." + exit 1 +else + echo "Successfully called setGameStateCanisterId for GameState $CANISTER_ID_GAME_STATE_CANISTER." +fi + +####################################################################### +echo " " +echo "===================================================" +echo "We have $NUM_MAINERS_DEPLOYED mainers" +mainer_id_start=0 +mainer_id_end=$((NUM_MAINERS_DEPLOYED - 1)) + +for m in $(seq $mainer_id_start $mainer_id_end) +do + MAINER="mainer_ctrlb_canister_$m" + + echo " " + echo "--------------------------------------------------" + echo "Checking health endpoint for $MAINER" + output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 + else + echo "$MAINER is healthy." + fi + + echo " " + echo "--------------------------------------------------" + echo "Registering GameState with the $MAINER" + output=$(dfx canister call $MAINER setGameStateCanisterId "(\"$CANISTER_ID_GAME_STATE_CANISTER\")" --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling setGameStateCanisterId for GameState $CANISTER_ID_GAME_STATE_CANISTER." + exit 1 + else + echo "Successfully called setGameStateCanisterId for GameState $CANISTER_ID_GAME_STATE_CANISTER." + fi +done \ No newline at end of file diff --git a/src/ShareService/scripts/register-llms-or-mainer-service.sh b/src/ShareService/scripts/register-llms-or-mainer-service.sh new file mode 100755 index 0000000..6807ad9 --- /dev/null +++ b/src/ShareService/scripts/register-llms-or-mainer-service.sh @@ -0,0 +1,221 @@ +#!/bin/bash + +####################################################################### +# run from parent folder as: +# scripts/register-llms-or-mainer-service.sh --network [local|ic] +####################################################################### + +# Default network type is local +NETWORK_TYPE="local" +NUM_MAINERS_DEPLOYED=3 # Total number of mainers deployed +MAINER_CANISTER_TYPES=("ShareAgent" "ShareAgent" "Own" ) +NUM_LLMS_PER_MAINER_OWN=1 +NUM_LLMS_PER_MAINER_SHARE_SERVICE=1 +NUM_LLMS_ROUND_ROBIN=1 # how many registered LLMs per mainer/service we actually use + +# When deploying local, use canister IDs from .env +source ../../llms/mAIner/.env +source .env # to get CANISTER_ID_MAINER_SERVICE_CANISTER + +# none will not use subnet parameter in deploy to ic +SUBNET="none" + +# Parse command line arguments for network type +while [ $# -gt 0 ]; do + case "$1" in + --network) + shift + if [ "$1" = "local" ] || [ "$1" = "ic" ] || [ "$1" = "testing" ] || [ "$1" = "development" ] || [ "$1" = "demo" ] || [ "$1" = "prd" ]; then + NETWORK_TYPE=$1 + if [ "$NETWORK_TYPE" = "ic" ]; then + CANISTER_ID_MAINER_SERVICE_CANISTER='TODO' + CANISTER_ID_LLM_0='xflcp-qiaaa-aaaaj-az4nq-cai' + CANISTER_ID_LLM_1='xqmtc-raaaa-aaaaj-az4oa-cai' + elif [ "$NETWORK_TYPE" = "testing" ]; then + CANISTER_ID_MAINER_SERVICE_CANISTER='TODO: testing CANISTER_ID_MAINER_SERVICE_CANISTER' + CANISTER_ID_LLM_0='TODO: testing CANISTER_ID_LLM_0' + CANISTER_ID_LLM_1='TODO: testing CANISTER_ID_LLM_1' + fi + else + echo "Invalid network type: $1. Use 'local' or 'ic' or 'testing' or 'development' or 'demo' or 'prd'." + exit 1 + fi + shift + ;; + *) + echo "Unknown argument: $1" + echo "Usage: $0 --network [local|ic]" + exit 1 + ;; + esac +done + +echo "Using network type: $NETWORK_TYPE" + +CANISTER_ID_MAINER_CTRLB_CANISTERS=( + $CANISTER_ID_MAINER_CTRLB_CANISTER_0 + $CANISTER_ID_MAINER_CTRLB_CANISTER_1 + $CANISTER_ID_MAINER_CTRLB_CANISTER_2 + $CANISTER_ID_MAINER_CTRLB_CANISTER_3 + $CANISTER_ID_MAINER_CTRLB_CANISTER_4 + $CANISTER_ID_MAINER_CTRLB_CANISTER_5 + $CANISTER_ID_MAINER_CTRLB_CANISTER_6 + $CANISTER_ID_MAINER_CTRLB_CANISTER_7 + $CANISTER_ID_MAINER_CTRLB_CANISTER_8 + $CANISTER_ID_MAINER_CTRLB_CANISTER_9 + $CANISTER_ID_MAINER_CTRLB_CANISTER_10 + $CANISTER_ID_MAINER_CTRLB_CANISTER_11 +) + +CANISTER_ID_LLMS=( + $CANISTER_ID_LLM_0 + $CANISTER_ID_LLM_1 + $CANISTER_ID_LLM_2 + $CANISTER_ID_LLM_3 + $CANISTER_ID_LLM_4 + $CANISTER_ID_LLM_5 + $CANISTER_ID_LLM_6 + $CANISTER_ID_LLM_7 + $CANISTER_ID_LLM_8 + $CANISTER_ID_LLM_9 + $CANISTER_ID_LLM_10 + $CANISTER_ID_LLM_11 +) + +i=0 # LLM index + +####################################################################### +echo " " +MAINER="mainer_service_canister" +MAINER_CANISTER_TYPE="ShareService" + +echo " " +echo "--------------------------------------------------" +echo "$MAINER of type $MAINER_CANISTER_TYPE with id $CANISTER_ID_MAINER_SERVICE_CANISTER" + +echo " " +echo "Checking health endpoint for $MAINER" +output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 +else + echo "$MAINER is healthy." +fi + +echo " " +echo "Calling reset_llm_canisters." +output=$(dfx canister call $MAINER reset_llm_canisters --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling reset_llm_canisters. Exiting." + exit 1 +fi + +for n in $(seq 0 $((NUM_LLMS_PER_MAINER_SHARE_SERVICE - 1))); +do + CANISTER_ID_LLM=${CANISTER_ID_LLMS[$i]} + echo " " + echo "registering LLM $i ($CANISTER_ID_LLM) with $MAINER" + output=$(dfx canister call $MAINER add_llm_canister "(record { canister_id = \"$CANISTER_ID_LLM\" })" --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling add_llm_canister for $CANISTER_ID_LLM. Exiting." + exit 1 + fi + ((i++)) # next LLM +done + +echo " " +echo "Setting NUM_LLMS_ROUND_ROBIN to $NUM_LLMS_ROUND_ROBIN for $MAINER" +output=$(dfx canister call $MAINER setRoundRobinLLMs "($NUM_LLMS_ROUND_ROBIN)" --network $NETWORK_TYPE) + +if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "setRoundRobinLLMs call failed. Exiting." + exit 1 +fi + +####################################################################### +echo " " +echo "===================================================" +echo "We have $NUM_MAINERS_DEPLOYED mAIner Agents" +mainer_id_start=0 +mainer_id_end=$((NUM_MAINERS_DEPLOYED - 1)) + +for m in $(seq $mainer_id_start $mainer_id_end) +do + MAINER="mainer_ctrlb_canister_$m" + MAINER_CANISTER_ID=${CANISTER_ID_MAINER_CTRLB_CANISTERS[$m]} + MAINER_CANISTER_TYPE=${MAINER_CANISTER_TYPES[$m]} + + echo " " + echo "--------------------------------------------------" + echo "$MAINER ($MAINER_CANISTER_ID) of type $MAINER_CANISTER_TYPE" + + echo " " + echo "Checking health endpoint for $MAINER" + output=$(dfx canister call $MAINER health --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "$MAINER is not healthy. Exiting." + exit 1 + else + echo "$MAINER is healthy." + fi + + if [ "$MAINER_CANISTER_TYPE" = "Own" ]; then + echo "Calling reset_llm_canisters." + output=$(dfx canister call $MAINER reset_llm_canisters --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling reset_llm_canisters. Exiting." + exit 1 + fi + + for n in $(seq 0 $((NUM_LLMS_PER_MAINER_OWN - 1))); + do + CANISTER_ID_LLM=${CANISTER_ID_LLMS[$i]} + echo "registering LLM $i ($CANISTER_ID_LLM) with $MAINER" + output=$(dfx canister call $MAINER add_llm_canister "(record { canister_id = \"$CANISTER_ID_LLM\" })" --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling add_llm_canister for $CANISTER_ID_LLM. Exiting." + exit 1 + fi + ((i++)) # next LLM + done + + echo " " + echo "Setting NUM_LLMS_ROUND_ROBIN to $NUM_LLMS_ROUND_ROBIN for $MAINER" + output=$(dfx canister call $MAINER setRoundRobinLLMs "($NUM_LLMS_ROUND_ROBIN)" --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "setRoundRobinLLMs call failed. Exiting." + exit 1 + fi + elif [ "$MAINER_CANISTER_TYPE" = "ShareAgent" ]; then + # ------------ + echo "Registering mainer_service_canister ($CANISTER_ID_MAINER_SERVICE_CANISTER) with $MAINER" + output=$(dfx canister call $MAINER setShareServiceCanisterId "(\"$CANISTER_ID_MAINER_SERVICE_CANISTER\")" --network $NETWORK_TYPE) + + if [ "$output" != "(variant { Ok = record { status_code = 200 : nat16 } })" ]; then + echo "Error calling setShareServiceCanisterId for $CANISTER_ID_MAINER_SERVICE_CANISTER. Exiting." + exit 1 + fi + + # ------------ + echo "Registering $MAINER ($MAINER_CANISTER_ID) with the mainer_service_canister" + MYPRINCIPAL=$(dfx identity get-principal | tr -d '\n') + output=$(dfx canister call mainer_service_canister addMainerShareAgentCanisterAdmin "(record { address = \"$MAINER_CANISTER_ID\"; canisterType = variant {MainerAgent}; ownedBy = principal \"$MYPRINCIPAL\" })" --network $NETWORK_TYPE) + + if [[ "$output" != *"Ok = record"* ]]; then + if [[ "$output" != "(variant { Err = variant { Other = \"Canister entry already exists\" } })" ]]; then + echo "Error calling addMainerShareAgentCanisterAdmin for mAIner $MAINER_CANISTER_ID." + echo $output + else + echo "$MAINER ($MAINER_CANISTER_ID) is already registered with the game_state_canister." + fi + fi + fi +done \ No newline at end of file diff --git a/src/ShareService/scripts/top-off.sh b/src/ShareService/scripts/top-off.sh new file mode 100755 index 0000000..ba43ad9 --- /dev/null +++ b/src/ShareService/scripts/top-off.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +####################################################################### +# run from parent folder as: +# scripts/top-off.sh --network [local|ic] +####################################################################### + +# Default network type is local +NETWORK_TYPE="local" +NUM_MAINERS_DEPLOYED=3 # Total number of mainers deployed + +# Parse command line arguments for network type +while [ $# -gt 0 ]; do + case "$1" in + --network) + shift + if [ "$1" = "local" ] || [ "$1" = "ic" ] || [ "$1" = "testing" ] || [ "$1" = "development" ] || [ "$1" = "demo" ] || [ "$1" = "prd" ]; then + NETWORK_TYPE=$1 + else + echo "Invalid network type: $1. Use 'local' or 'ic' or 'testing' or 'development' or 'demo' or 'prd'." + exit 1 + fi + shift + ;; + *) + echo "Unknown argument: $1" + echo "Usage: $0 --network [local|ic]" + exit 1 + ;; + esac +done + +####################################################################### +# Define the threshold balance in TCycles (When to top off) +TOPPED_OFF_BALANCE_TRESHOLD_TC=1 +TOPPED_OFF_BALANCE_TRESHOLD=$(echo "$TOPPED_OFF_BALANCE_TRESHOLD_TC * 1000000000000" | bc) +TOPPED_OFF_BALANCE_TRESHOLD=$(printf "%.0f" $TOPPED_OFF_BALANCE_TRESHOLD) + +# Define the target balance in TCycles (To top off too) +TOPPED_OFF_BALANCE_TARGET_TC=3 +TOPPED_OFF_BALANCE_TARGET=$(echo "$TOPPED_OFF_BALANCE_TARGET_TC * 1000000000000" | bc) +TOPPED_OFF_BALANCE_TARGET=$(printf "%.0f" $TOPPED_OFF_BALANCE_TARGET) + + +# top off cycles for mAIner Service canister +MAINER="mainer_service_canister" +CURRENT_BALANCE=$(dfx canister --network $NETWORK_TYPE status $MAINER 2>&1 | grep "Balance:" | awk '{gsub("_", ""); print $2}') +NEED_CYCLES_THRESHOLD=$(echo "$TOPPED_OFF_BALANCE_TRESHOLD - $CURRENT_BALANCE" | bc) +NEED_CYCLES_TARGET=$(echo "$TOPPED_OFF_BALANCE_TARGET - $CURRENT_BALANCE" | bc) +if [ $(echo "$NEED_CYCLES_THRESHOLD > 0" | bc) -eq 1 ]; then + CANISTER_ID=$(dfx canister --network $NETWORK_TYPE id $MAINER) + echo "Sending $NEED_CYCLES_TARGET cycles to $MAINER" + dfx wallet send $CANISTER_ID $NEED_CYCLES_TARGET --network $NETWORK_TYPE +else + echo "No need to send cycles to $MAINER. Balance = $(echo "scale=2; $CURRENT_BALANCE / 1000000000000" | bc) TCycles" +fi + + +# top off cycles for mAIner Agent canisters +mainer_id_start=0 +mainer_id_end=$((NUM_MAINERS_DEPLOYED - 1)) + +i=0 # LLM index +for m in $(seq $mainer_id_start $mainer_id_end) +do + MAINER="mainer_ctrlb_canister_$m" + CURRENT_BALANCE=$(dfx canister --network $NETWORK_TYPE status $MAINER 2>&1 | grep "Balance:" | awk '{gsub("_", ""); print $2}') + NEED_CYCLES_THRESHOLD=$(echo "$TOPPED_OFF_BALANCE_TRESHOLD - $CURRENT_BALANCE" | bc) + NEED_CYCLES_TARGET=$(echo "$TOPPED_OFF_BALANCE_TARGET - $CURRENT_BALANCE" | bc) + if [ $(echo "$NEED_CYCLES_THRESHOLD > 0" | bc) -eq 1 ]; then + CANISTER_ID=$(dfx canister --network $NETWORK_TYPE id $MAINER) + echo "Sending $NEED_CYCLES_TARGET cycles to $MAINER" + dfx wallet send $CANISTER_ID $NEED_CYCLES_TARGET --network $NETWORK_TYPE + else + echo "No need to send cycles to $MAINER. Balance = $(echo "scale=2; $CURRENT_BALANCE / 1000000000000" | bc) TCycles" + fi +done \ No newline at end of file diff --git a/src/ShareService/src/Main.mo b/src/ShareService/src/Main.mo new file mode 100644 index 0000000..927c5de --- /dev/null +++ b/src/ShareService/src/Main.mo @@ -0,0 +1,2479 @@ +import Buffer "mo:base/Buffer"; +import Blob "mo:base/Blob"; +import D "mo:base/Debug"; +import Error "mo:base/Error"; +import Principal "mo:base/Principal"; +import Text "mo:base/Text"; +import Nat "mo:base/Nat"; +import Nat8 "mo:base/Nat8"; +import Nat32 "mo:base/Nat32"; +import Nat64 "mo:base/Nat64"; +import Bool "mo:base/Bool"; +import HashMap "mo:base/HashMap"; +import List "mo:base/List"; +import Int "mo:base/Int"; +import Time "mo:base/Time"; +import Iter "mo:base/Iter"; +import Float "mo:base/Float"; +import Cycles "mo:base/ExperimentalCycles"; +import { setTimer; recurringTimer } = "mo:base/Timer"; +import Timer "mo:base/Timer"; +import Random "mo:base/Random"; + +import Types "../../common/Types"; +import Constants "../../common/Constants"; +import ICManagementCanister "../../common/ICManagementCanister"; +import TimerRegularity "../../common/TimerRegularity"; +import Utils "Utils"; + +actor class MainerAgentCtrlbCanister() = this { + + let IC0 : ICManagementCanister.IC_Management = actor ("aaaaa-aa"); + + stable var MAINER_AGENT_CANISTER_TYPE : Types.MainerAgentCanisterType = #Own; + + public shared (msg) func setMainerCanisterType(_mainer_agent_canister_type : Types.MainerAgentCanisterType) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + MAINER_AGENT_CANISTER_TYPE := _mainer_agent_canister_type; + + // Avoid wrong timers from running when changing mainer canister type + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): setMainerCanisterType - Stopping Timers"); + let result = await stopTimerExecution(); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): setMainerCanisterType - " # debug_show(result)); + + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getMainerCanisterType() : async Types.MainerAgentCanisterTypeResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + + return #Ok(MAINER_AGENT_CANISTER_TYPE); + }; + + // ------------------------------- + stable var GAME_STATE_CANISTER_ID : Text = "r5m5y-diaaa-aaaaa-qanaa-cai"; // prd + + public shared (msg) func setGameStateCanisterId(_game_state_canister_id : Text) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + GAME_STATE_CANISTER_ID := _game_state_canister_id; + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getGameStateCanisterId() : async Text { + if (not Principal.isController(msg.caller)) { + return "#Err(#StatusCode(401))"; + }; + + return GAME_STATE_CANISTER_ID; + }; + + // Flag to pause mAIner for maintenance + stable var MAINTENANCE : Bool = true; + + public shared (msg) func toggleMaintenanceFlagAdmin() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + MAINTENANCE := not MAINTENANCE; + let authRecord = { auth = "You set the flag to " # debug_show(MAINTENANCE) }; + return #Ok(authRecord); + }; + + public query func getMaintenanceFlag() : async Types.FlagResult { + return #Ok({ flag = MAINTENANCE }); + }; + + // Official cycle balance + stable var officialCyclesBalance : Nat = Cycles.balance(); // TODO - Implementation: ensure this picks up the cycles the mAIner receives during creation + stable var officialCycleTopUpsStorage : List.List = List.nil(); + + public shared (msg) func addCycles() : async Types.AddCyclesResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + // Accept the cycles the call is charged with + let cyclesAdded = Cycles.accept(Cycles.available()); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addCycles - Accepted " # Nat.toText(cyclesAdded) # " Cycles from caller " # Principal.toText(msg.caller)); + + // Unpause the mAIner if it was paused due to low cycle balance + PAUSED_DUE_TO_LOW_CYCLE_BALANCE := false; + + // Add to official cycle balance and store all official top ups + if (Principal.equal(msg.caller, Principal.fromText(GAME_STATE_CANISTER_ID))) { + // Game State can make official top ups (via its top up flow) + officialCyclesBalance := officialCyclesBalance + cyclesAdded; + let topUpEntry : Types.OfficialMainerCycleTopUp = { + amountAdded : Nat = cyclesAdded; + newOfficialCycleBalance : Nat = officialCyclesBalance; + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + }; + officialCycleTopUpsStorage := List.push(topUpEntry, officialCycleTopUpsStorage); + }; + + return #Ok({ + added : Bool = true; + amount : Nat = cyclesAdded; + }); + }; + + // ------------------------------- + stable var SHARE_SERVICE_CANISTER_ID : Text = "bkyz2-fmaaa-aaaaa-qaaaq-cai"; // Dummy value; Only used by ShareAgent + stable var shareServiceCanisterActor = actor (SHARE_SERVICE_CANISTER_ID) : Types.MainerCanister_Actor; + + public shared (msg) func setShareServiceCanisterId(_share_service_canister_id : Text) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + SHARE_SERVICE_CANISTER_ID := _share_service_canister_id; + shareServiceCanisterActor := actor (SHARE_SERVICE_CANISTER_ID); + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getShareServiceCanisterId() : async Text { + if (not Principal.isController(msg.caller)) { + return "#Err(#StatusCode(401))"; + }; + + return SHARE_SERVICE_CANISTER_ID; + }; + + // -------------------------------------------------------------------------- +// Storage & functions used by SharedService mAiner canister to manage SharedAgent mAIner canisters + + // Official mAIner Creator canisters + stable var mainerCreatorCanistersStorageStable : [(Text, Types.OfficialProtocolCanister)] = []; + var mainerCreatorCanistersStorage : HashMap.HashMap = HashMap.HashMap(0, Text.equal, Text.hash); + + private func putMainerCreatorCanister(canisterAddress : Text, canisterEntry : Types.OfficialProtocolCanister) : Bool { + mainerCreatorCanistersStorage.put(canisterAddress, canisterEntry); + return true; + }; + + private func getMainerCreatorCanister(canisterAddress : Text) : ?Types.OfficialProtocolCanister { + switch (mainerCreatorCanistersStorage.get(canisterAddress)) { + case (null) { return null; }; + case (?canisterEntry) { return ?canisterEntry; }; + }; + }; + + private func removeMainerCreatorCanister(canisterAddress : Text) : Bool { + switch (mainerCreatorCanistersStorage.get(canisterAddress)) { + case (null) { return false; }; + case (?canisterEntry) { + let removeResult = mainerCreatorCanistersStorage.remove(canisterAddress); + return true; + }; + }; + }; + + private func getNextMainerCreatorCanisterEntry() : ?Types.OfficialProtocolCanister { + return mainerCreatorCanistersStorage.vals().next(); + }; + + // ShareAgent Registry: Official ShareAgent canisters (owned by users) + stable var shareAgentCanistersStorageStable : [(Text, Types.OfficialMainerAgentCanister)] = []; + var shareAgentCanistersStorage : HashMap.HashMap = HashMap.HashMap(0, Text.equal, Text.hash); + stable var userToShareAgentsStorageStable : [(Principal, List.List)] = []; + var userToShareAgentsStorage : HashMap.HashMap> = HashMap.HashMap(0, Principal.equal, Principal.hash); + + private func putShareAgentCanister(canisterAddress : Text, canisterEntry : Types.OfficialMainerAgentCanister) : Types.MainerAgentCanisterResult { + switch (getShareAgentCanister(canisterAddress)) { + case (null) { + shareAgentCanistersStorage.put(canisterAddress, canisterEntry); + switch (putUserShareAgent(canisterEntry)) { + case (false) { + return #Err(#Other("Error in putUserShareAgent")); + }; + case (true) { + return #Ok(canisterEntry); + }; + }; + }; + case (?canisterEntry) { + //existing entry + D.print("GameState: putShareAgentCanister - canisterEntry already exists -" # debug_show(canisterEntry)); + return #Err(#Other("Canister entry already exists")); + }; + }; + }; + + private func getShareAgentCanister(canisterAddress : Text) : ?Types.OfficialMainerAgentCanister { + switch (shareAgentCanistersStorage.get(canisterAddress)) { + case (null) { return null; }; + case (?canisterEntry) { return ?canisterEntry; }; + }; + }; + + private func removeShareAgentCanister(canisterAddress : Text) : Bool { + switch (shareAgentCanistersStorage.get(canisterAddress)) { + case (null) { return false; }; + case (?canisterEntry) { + let removeResult = shareAgentCanistersStorage.remove(canisterAddress); + // TODO - Implementation: remove from userToShareAgentsStorage + return true; + }; + }; + }; + + private func putUserShareAgent(canisterEntry : Types.OfficialMainerAgentCanister) : Bool { + switch (getUserShareAgents(canisterEntry.ownedBy)) { + case (null) { + // first entry + let userCanistersList : List.List = List.make(canisterEntry); + userToShareAgentsStorage.put(canisterEntry.ownedBy, userCanistersList); + return true; + }; + case (?userCanistersList) { + // existing list, add entry to it + let updatedUserCanistersList : List.List = List.push(canisterEntry, userCanistersList); + userToShareAgentsStorage.put(canisterEntry.ownedBy, updatedUserCanistersList); + return true; + }; + }; + }; + + private func getUserShareAgents(userId : Principal) : ?List.List { + switch (userToShareAgentsStorage.get(userId)) { + case (null) { return null; }; + case (?userCanistersList) { return ?userCanistersList; }; + }; + }; + + // Caution: function that returns all ShareAgent canisters (TODO - Security: decide if needed) + private func getShareAgents() : [Types.OfficialMainerAgentCanister] { + var shareAgents : List.List = List.nil(); + for (userShareAgentsList in userToShareAgentsStorage.vals()) { + shareAgents := List.append(userShareAgentsList, shareAgents); + }; + return List.toArray(shareAgents); + }; + + private func removeUserShareAgent(canisterEntry : Types.OfficialMainerAgentCanister) : Bool { + switch (getUserShareAgents(canisterEntry.ownedBy)) { + case (null) { return false; }; + case (?userCanistersList) { + //existing list, remove entry from it + let updatedUserCanistersList : List.List = List.filter(userCanistersList, func(listEntry: Types.OfficialMainerAgentCanister) : Bool { listEntry.address != canisterEntry.address }); + userToShareAgentsStorage.put(canisterEntry.ownedBy, updatedUserCanistersList); + return true; + }; + }; + }; + + // Share Service: flag to decide whether cycles should be sent to LLMs automatically as part of flow + stable var SEND_CYCLES_TO_LLM : Bool = true; + + public shared (msg) func toggleSendCyclesToLlmFlagAdmin() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + SEND_CYCLES_TO_LLM := not SEND_CYCLES_TO_LLM; + let authRecord = { auth = "You set the flag to " # debug_show(SEND_CYCLES_TO_LLM) }; + return #Ok(authRecord); + }; + + public query (msg) func getSendCyclesToLlmFlagAdmin() : async Types.FlagResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + return #Ok({ flag = SEND_CYCLES_TO_LLM }); + }; + + // Share Service: Move cycles to operator's wallet (e.g. onicai) + stable let OPERATOR_WALLET_ADDRESS : Text = "jh35u-eqaaa-aaaag-abf3a-cai"; + stable var cyclesTransactionsStorage : List.List = List.nil(); + + public query (msg) func getCyclesTransactionsAdmin() : async Types.CyclesTransactionsResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + return #Ok(List.toArray(cyclesTransactionsStorage)); + }; + + stable var MIN_CYCLES_BALANCE : Nat = 30 * Constants.CYCLES_TRILLION; + stable var CYCLES_AMOUNT_TO_OPERATOR : Nat = 10 * Constants.CYCLES_TRILLION; + + public shared (msg) func sendCyclesToOperatorAdmin() : async Types.AddCyclesResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let currentCyclesBalance : Nat = Cycles.balance(); + try { + // Only move cycles if cycles balance is big enough + if (currentCyclesBalance - CYCLES_AMOUNT_TO_OPERATOR < MIN_CYCLES_BALANCE) { + D.print("ShareService: sendCyclesToOperatorAdmin - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); + return #Err(#Unauthorized); + }; + + D.print("ShareService: sendCyclesToOperatorAdmin - OPERATOR_WALLET_ADDRESS: " # debug_show(OPERATOR_WALLET_ADDRESS)); + D.print("ShareService: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); + Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); + // Send via system API + D.print("ShareService: sendCyclesToOperatorAdmin - calling system API's deposit_cycles to send cycles"); + let deposit_cycles_args = { canister_id : Principal = Principal.fromText(OPERATOR_WALLET_ADDRESS); }; + let _ = ignore IC0.deposit_cycles(deposit_cycles_args); + D.print("ShareService: sendCyclesToOperatorAdmin - called deposit_cycles with ignore"); + // Store the transaction + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = true; + previousCyclesBalance : Nat = currentCyclesBalance; + }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + D.print("ShareService: sendCyclesToOperatorAdmin - stored transactionEntry: " # debug_show(transactionEntry)); + let addCyclesResponse : Types.AddCyclesRecord = { + added : Bool = true; + amount : Nat = CYCLES_AMOUNT_TO_OPERATOR; + }; + return #Ok(addCyclesResponse); + } catch (e) { + D.print("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e)); + // Store the failed attempt + let transactionEntry : Types.CyclesTransaction = { + amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; + newOfficialCycleBalance : Nat = Cycles.balance(); + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + sentBy : Principal = msg.caller; + succeeded : Bool = false; + previousCyclesBalance : Nat = currentCyclesBalance; + }; + cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); + return #Err(#Other("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e))); + }; + }; + + public shared (msg) func setMinCyclesBalanceAdmin(newCyclesBalance : Nat) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (newCyclesBalance < 20 * Constants.CYCLES_TRILLION) { + return #Err(#StatusCode(401)); + }; + MIN_CYCLES_BALANCE := newCyclesBalance; + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getMinCyclesBalanceAdmin() : async Nat { + if (not Principal.isController(msg.caller)) { + return 0; + }; + + return MIN_CYCLES_BALANCE; + }; + + public shared (msg) func setCyclesToSendToOperatorAdmin(newValue : Nat) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (newValue > 100 * Constants.CYCLES_TRILLION) { + return #Err(#StatusCode(401)); + }; + CYCLES_AMOUNT_TO_OPERATOR := newValue; + return #Ok({ status_code = 200 }); + }; + + public query (msg) func getCyclesToSendToOperatorAdmin() : async Nat { + if (not Principal.isController(msg.caller)) { + return 0; + }; + + return CYCLES_AMOUNT_TO_OPERATOR; + }; + + // -------------------------------------------------------------------------- + // Orthogonal Persisted Data storage + + + // The minimum cycle balance we want to maintain + stable let CYCLE_BALANCE_MINIMUM = 250 * Constants.CYCLES_BILLION; + + // A flag for the frontend to pick up and display a message to the user + stable var PAUSED_DUE_TO_LOW_CYCLE_BALANCE : Bool = false; + + // Internal functions to check if the canister has enough cycles + private func sufficientCyclesToProcessChallenge(challenge : Types.Challenge) : Bool { + // The ShareService canister does not Queue or Submit + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + return true; + }; + + let availableCycles = Cycles.balance(); + var requiredCycles = challenge.cyclesSubmitResponse + CYCLE_BALANCE_MINIMUM; + if (MAINER_AGENT_CANISTER_TYPE == #ShareAgent) { + requiredCycles := requiredCycles + challenge.cyclesGenerateResponseSactrlSsctrl; + }; + if (MAINER_AGENT_CANISTER_TYPE == #Own) { + // TODO: do calculation based on actual setting for LOW, MEDIUM, HIGH + requiredCycles := requiredCycles + challenge.cyclesGenerateResponseOwnctrlGs + challenge.cyclesGenerateResponseOwnctrlOwnllmHIGH; + }; + if (availableCycles < requiredCycles) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): CYCLE BALANCE TOO LOW TO PROCESS CHALLENGE:"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): requiredCycles = " # debug_show(requiredCycles)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): availableCycles = " # debug_show(availableCycles)); + return false; + }; + return true; + }; + + private func sufficientCyclesToSubmit(cyclesSubmitResponse : Nat) : Bool { + // The ShareService canister does not submit + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + return true; + }; + + let availableCycles = Cycles.balance(); + let requiredCycles = cyclesSubmitResponse + CYCLE_BALANCE_MINIMUM; + if (availableCycles < requiredCycles) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): CYCLE BALANCE TOO LOW TO SUBMIT RESPONSE TO GAMESTATE:"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): requiredCycles = " # debug_show(requiredCycles)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): availableCycles = " # debug_show(availableCycles)); + return false; + }; + return true; + }; + + public query (msg) func getIssueFlagsAdmin() : async Types.IssueFlagsRetrievalResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let response : Types.IssueFlagsRecord = { + lowCycleBalance = PAUSED_DUE_TO_LOW_CYCLE_BALANCE; + }; + return #Ok(response); + }; + + // Statistics + // TODO - Implementation: set based on cycles flow data calculated in GameState + stable var TOTAL_MAINER_CYCLES_BURNT : Nat = 100 * Constants.CYCLES_BILLION; // Initial value represents costs for creating this canister + + // TODO - Implementation: ensure all relevant events for cycle buring are captured and adjust cycle burning numbers below to actual values + private func increaseTotalCyclesBurnt(cyclesBurntToAdd : Nat) : Bool { + TOTAL_MAINER_CYCLES_BURNT := TOTAL_MAINER_CYCLES_BURNT + cyclesBurntToAdd; + return true; + }; + + // TODO - Implementation: set based on cycles flow data calculated in GameState + stable let CYCLES_BURNT_RESPONSE_GENERATION : Nat = 200 * Constants.CYCLES_BILLION; + + // This is just a placeholder to be used until the startTimerExecution is called. + stable let CYCLES_BURN_RATE_DEFAULT : Types.CyclesBurnRate = { + cycles : Nat = 1 * Constants.CYCLES_TRILLION; + timeInterval : Types.TimeInterval = #Daily; + }; + + public query (msg) func getMainerStatisticsAdmin() : async Types.StatisticsRetrievalResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + var cyclesBurnRateToReturn : Types.CyclesBurnRate = CYCLES_BURN_RATE_DEFAULT; + switch (getCurrentAgentSettings()) { + case (null) {}; + case (?agentSettings) { + cyclesBurnRateToReturn := cyclesBurnRateFromGameState; + }; + }; + let response : Types.StatisticsRecord = { + totalCyclesBurnt = TOTAL_MAINER_CYCLES_BURNT; + cycleBalance = Cycles.balance(); + cyclesBurnRate = cyclesBurnRateToReturn; + }; + return #Ok(response); + }; + + // timer IDs for reporting purposes (actual stopping uses the buffers) + // Note: they're stable for historical reasons; could be transient because timers do not survive upgrades + // is ok, because startTimer & stopTimer functions will reset them + stable var initialTimerId1 : ?Timer.TimerId = null; // For reporting only + stable var recurringTimerId1 : ?Timer.TimerId = null; + stable var recurringTimerId2 : ?Timer.TimerId = null; + + // Configurable buffer max size for timer IDs + stable var TIMER_BUFFER_MAX_SIZE : Nat = 4; + + // Non-stable buffers to track timer IDs created since last upgrade + // These reset to empty after each upgrade, which is the desired behavior + // FIFO buffers with configurable max length + transient let bufferTimerId1 = Buffer.Buffer(TIMER_BUFFER_MAX_SIZE); + transient let bufferTimerId2 = Buffer.Buffer(TIMER_BUFFER_MAX_SIZE); + + // Helper function to add timer ID using FIFO approach with configurable max length + private func addTimerToBuffer(buffer : Buffer.Buffer, timerId : Timer.TimerId) : () { + if (buffer.size() >= TIMER_BUFFER_MAX_SIZE) { + // Remove the oldest entry (FIFO) + ignore buffer.removeLast(); + }; + // Add new timer ID to the beginning + buffer.insert(0, timerId); + }; + + // Record of settings + stable var agentSettings : List.List = List.nil(); + + private func putAgentSettings(settingsEntry : Types.MainerAgentSettings) : Bool { + agentSettings := List.push(settingsEntry, agentSettings); + return true; + }; + + private func getCurrentAgentSettings() : ?Types.MainerAgentSettings { + return List.get(agentSettings, 0); + }; + + public shared query (msg) func getCurrentAgentSettingsAdmin() : async Types.MainerAgentSettingsResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + switch (getCurrentAgentSettings()) { + case (null) { + return #Err(#Other("No agent settings found")); + }; + case (?settings) { + return #Ok(settings); + }; + }; + }; + + public shared query (msg) func getAgentSettingsAdmin() : async Types.MainerAgentSettingsListResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + let settingsArray = List.toArray(agentSettings); + return #Ok(settingsArray); + }; + + // Record of timers since last upgrade + transient var agentTimers : List.List = List.nil(); + + private func putAgentTimers(timersEntry : Types.MainerAgentTimers) : Bool { + agentTimers := List.push(timersEntry, agentTimers); + return true; + }; + + private func getCurrentAgentTimers() : ?Types.MainerAgentTimers { + return List.get(agentTimers, 0); + }; + + public shared query (msg) func getCurrentAgentTimersAdmin() : async Types.MainerAgentTimersResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + switch (getCurrentAgentTimers()) { + case (null) { + return #Err(#Other("No agent timers found")); + }; + case (?timers) { + return #Ok(timers); + }; + }; + }; + + public shared query (msg) func getAgentTimersAdmin() : async Types.MainerAgentTimersListResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + let timersArray = List.toArray(agentTimers); + return #Ok(timersArray); + }; + + // FIFO queue of challenges: retrieved from GameState; to be processed + stable var MAX_CHALLENGES_IN_QUEUE : Nat = 5; + stable var challengeQueue : List.List = List.nil(); + + private func pushChallengeQueue(challengeQueueInput : Types.ChallengeQueueInput) : Bool { + challengeQueue := List.push(challengeQueueInput, challengeQueue); + return true; + }; + + private func popChallengeQueue() : ?Types.ChallengeQueueInput { + let (head, tail) = List.pop(challengeQueue); + challengeQueue := tail; + head; + }; + + private func getChallengeQueueFromId(challengeQueuedId : Text) : ?Types.ChallengeQueueInput { + return List.find(challengeQueue, func(challengeQueueInput : Types.ChallengeQueueInput) : Bool { challengeQueueInput.challengeQueuedId == challengeQueuedId }); + }; + + private func removeChallengeQueue(challengeQueuedId : Text) : Bool { + challengeQueue := List.filter(challengeQueue, func(challengeQueueInputEntry : Types.ChallengeQueueInput) : Bool { challengeQueueInputEntry.challengeQueuedId != challengeQueuedId }); + return true; + }; + + public query (msg) func getChallengeQueueAdmin() : async Types.ChallengeQueueInputsResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let challengeQueueInputs : [Types.ChallengeQueueInput] = List.toArray(challengeQueue); + return #Ok(challengeQueueInputs); + }; + + public shared (msg) func resetChallengeQueueAdmin() : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + challengeQueue := List.nil(); + return #Ok({ status_code = 200 }); + }; + + // Record of generated responses + stable var generatedResponses : List.List = List.nil(); + + private func putGeneratedResponse(responseEntry : Types.ChallengeResponseSubmissionInput) : Bool { + generatedResponses := List.push(responseEntry, generatedResponses); + return true; + }; + + private func getGeneratedResponse(challengeId : Text) : ?Types.ChallengeResponseSubmissionInput { + return List.find(generatedResponses, func(responseEntry : Types.ChallengeResponseSubmissionInput) : Bool { responseEntry.challengeId == challengeId }); + }; + + private func getGeneratedResponses() : [Types.ChallengeResponseSubmissionInput] { + return List.toArray(generatedResponses); + }; + + private func removeGeneratedResponse(challengeId : Text) : Bool { + generatedResponses := List.filter(generatedResponses, func(responseEntry : Types.ChallengeResponseSubmissionInput) : Bool { responseEntry.challengeId != challengeId }); + return true; + }; + + // Record of submitted responses + stable var submittedResponses : List.List = List.nil(); + + private func putSubmittedResponse(responseEntry : Types.ChallengeResponseSubmission) : Bool { + submittedResponses := List.push(responseEntry, submittedResponses); + return true; + }; + + private func getSubmittedResponse(submissionId : Text) : ?Types.ChallengeResponseSubmission { + return List.find(submittedResponses, func(responseEntry : Types.ChallengeResponseSubmission) : Bool { responseEntry.submissionId == submissionId }); + }; + + private func getSubmittedResponses() : [Types.ChallengeResponseSubmission] { + return List.toArray(submittedResponses); + }; + + private func getLastSubmittedResponses(numberToRetrieve : Nat) : [Types.ChallengeResponseSubmission] { + return List.toArray(List.take(submittedResponses, numberToRetrieve)); + }; + + private func removeSubmittedResponse(submissionId : Text) : Bool { + submittedResponses := List.filter(submittedResponses, func(responseEntry : Types.ChallengeResponseSubmission) : Bool { responseEntry.submissionId != submissionId }); + return true; + }; + + public query (msg) func getSubmittedResponsesAdmin() : async Types.ChallengeResponseSubmissionsResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let submissions : [Types.ChallengeResponseSubmission] = getSubmittedResponses(); + return #Ok(submissions); + }; + + public query (msg) func getRecentSubmittedResponsesAdmin() : async Types.ChallengeResponseSubmissionsResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + let submissions : [Types.ChallengeResponseSubmission] = getLastSubmittedResponses(5); + return #Ok(submissions); + }; + + // ------------------------------------------------------------------------------- + // The C++ LLM canisters that can be called + + stable var llmCanistersStable : [Text] = []; + private var llmCanisters : Buffer.Buffer = Buffer.fromArray([]); + + // Round-robin load balancer for LLM canisters to call + private var roundRobinIndex : Nat = 0; + private var roundRobinUseAll : Bool = true; + private var roundRobinLLMs : Nat = 0; // Only used when roundRobinUseAll is false + + public shared query (msg) func get_llm_canisters() : async Types.LlmCanistersRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + let llmCanisterIds : [Types.CanisterAddress] = Buffer.toArray( + Buffer.map(llmCanisters, func (llm : Types.LLMCanister) : Text { + Principal.toText(Principal.fromActor(llm)) + }) + ); + return #Ok({ + llmCanisterIds = llmCanisterIds; + roundRobinUseAll = roundRobinUseAll; + roundRobinLLMs = roundRobinLLMs; + }); + }; + + public shared (msg) func reset_llm_canisters() : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): reset_llm_canisters - Resetting all LLM canisters & round-robin state"); + llmCanisters.clear(); + resetRoundRobinLLMs_(); + return #Ok({ status_code = 200 }); + }; + + public shared (msg) func add_llm_canister(llmCanisterIdRecord : Types.CanisterIDRecord) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): add_llm_canister - Adding llm: " # llmCanisterIdRecord.canister_id); + let llmCanister = actor (llmCanisterIdRecord.canister_id) : Types.LLMCanister; + llmCanisters.add(llmCanister); + return #Ok({ status_code = 200 }); + }; + + public shared (msg) func remove_llm_canister(llmCanisterIdRecord : Types.CanisterIDRecord) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + + let targetCanisterText = llmCanisterIdRecord.canister_id; + + // Remove the LLM canister if found + for (i in Iter.range(0, llmCanisters.size())) { + let existing = llmCanisters.getOpt(i); + switch (existing) { + case (?item) { + let principalText = Principal.toText(Principal.fromActor(item)); + if (principalText == targetCanisterText) { + ignore llmCanisters.remove(i); + + // For safety against out-of-bounds, reset roundRobinIndex + roundRobinIndex := 0; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): remove_llm_canister - Removed llm: " # targetCanisterText); + return #Ok({ status_code = 200 }); + }; + }; + case null {}; // Skip if none + }; + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): remove_llm_canister - Cannot find llm in the list: " # targetCanisterText); + return #Err(#StatusCode(404)); // Not found + }; + + + // Admin function to reset roundRobinLLMs + public shared (msg) func resetRoundRobinLLMs() : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + resetRoundRobinLLMs_(); + return #Ok({ status_code = 200 }); + }; + private func resetRoundRobinLLMs_() { + roundRobinUseAll := true; + roundRobinLLMs := 0; // Use all LLMs + roundRobinIndex := 0; + }; + + // Admin function to set roundRobinLLMs + public shared (msg) func setRoundRobinLLMs(_roundRobinLLMs : Nat) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + roundRobinUseAll := false; + roundRobinLLMs := _roundRobinLLMs; + roundRobinIndex := 0; + + return #Ok({ status_code = 200 }); + }; + + public shared query (msg) func whoami() : async Principal { + return msg.caller; + }; + + // Function to verify that canister is up & running + public shared query func health() : async Types.StatusCodeRecordResult { + if (MAINTENANCE) { + return #Err(#Other("mAIner is under maintenance")); + }; + return #Ok({ status_code = 200 }); + }; + + // Function to verify that canister is ready for inference + public shared (msg) func ready() : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + for (llmCanister in llmCanisters.vals()) { + try { + let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.ready(); + switch (statusCodeRecordResult) { + case (#Err(_)) { return statusCodeRecordResult }; + case (_) { + // If it's not an error, do nothing and continue the loop + }; + }; + } catch (_) { + // Handle errors, such as llm canister not responding + return #Err(#Other("Failed to call ready endpoint of llm canister = " # Principal.toText(Principal.fromActor(llmCanister)))); + }; + }; + return #Ok({ status_code = 200 }); + }; + + // Admin function to verify that caller is a controller of this canister + public shared query (msg) func amiController() : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + return #Ok({ status_code = 200 }); + }; + + // Admin function to verify that mainer_ctrlb_canister is a controller of all the llm canisters + public shared (msg) func checkAccessToLLMs() : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + + // Call all the llm canisters to verify that mainer_ctrlb_canister is a controller + for (llmCanister in llmCanisters.vals()) { + try { + let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.check_access(); + switch (statusCodeRecordResult) { + case (#Err(_)) { return statusCodeRecordResult }; + case (_) { + // If it's not an error, do nothing and continue the loop + }; + }; + } catch (_) { + // Handle errors, such as llm canister not responding + return #Err(#Other("Call failed to llm canister = " # Principal.toText(Principal.fromActor(llmCanister)))); + }; + }; + return #Ok({ status_code = 200 }); + }; + + // TODO: deprecate this function - use get_llm_canisters instead + public query (msg) func getLLMCanisterIds() : async Types.CanisterAddressesResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + + var llmCanisterIds : List.List = List.nil(); + + for (llmCanister in llmCanisters.vals()) { + try { + llmCanisterIds := List.push(Principal.toText(Principal.fromActor(llmCanister)), llmCanisterIds); + } catch (error : Error) { + return #Err(#Other("Call failed to load llm canisters = " # Principal.toText(Principal.fromActor(llmCanister)) # Error.message(error))); + }; + }; + + return #Ok(List.toArray(llmCanisterIds)); + }; + + // Settings + + private func areAgentSettingsUpdateable() : Bool { + switch (getCurrentAgentSettings()) { + case (null) { + // first update, so all good + return true; + }; + case (?agentSettings) { + // Check that last update was more than a day ago (one update per day is allowed) + let currentTime = Nat64.fromNat(Int.abs(Time.now())); + let oneDayNanos : Nat64 = 86_400_000_000_000; // 24h in nanoseconds + + if (currentTime - agentSettings.creationTimestamp < oneDayNanos) { + return false; + }; + return true; + }; + }; + }; + + public shared (msg) func canAgentSettingsBeUpdated() : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + switch (areAgentSettingsUpdateable()) { + case (true) { + return #Ok({ status_code = 200 }); + }; + case (false) { + return #Err(#Other("Last update is not yet 24h ago.")); + }; + }; + }; + + public shared (msg) func timeToNextAgentSettingsUpdate() : async Types.NatResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + switch (getCurrentAgentSettings()) { + case (null) { + // first update, so all good + return #Ok(0); + }; + case (?agentSettings) { + // one update per day is allowed + let currentTime = Nat64.fromNat(Int.abs(Time.now())); + let oneDayNanos : Nat64 = 86_400_000_000_000; // 24h in nanoseconds + + if (currentTime - agentSettings.creationTimestamp >= oneDayNanos) { + return #Ok(0); // last update was more than a day, so may be updated now + }; + let remainingTime = oneDayNanos - (currentTime - agentSettings.creationTimestamp); + return #Ok(Nat64.toNat(remainingTime)); + }; + }; + }; + + public shared (msg) func updateAgentSettings(settingsInput : Types.MainerAgentSettingsInput) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + switch (settingsInput.cyclesBurnRate) { + case (#Low) { + // continue + }; + case (#Mid) { + // continue + }; + case (#High) { + // continue + }; + case (#VeryHigh) { + // continue + }; + case (#Custom(customCyclesBurnRate)) { + // currently not supported + return #Err(#StatusCode(400)); + }; + case (_) { + return #Err(#StatusCode(400)); + }; + }; + switch (areAgentSettingsUpdateable()) { + case (true) { + // continue + }; + case (false) { + return #Err(#Other("Last update is not yet 24h ago.")); + }; + }; + + let settingsEntry : Types.MainerAgentSettings = { + cyclesBurnRate : Types.CyclesBurnRateDefault = settingsInput.cyclesBurnRate; + creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + createdBy : Principal = msg.caller; + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): updateAgentSettings - settingsEntry = " # debug_show(settingsEntry)); + let putResult = putAgentSettings(settingsEntry); + if (not putResult) { + return #Err(#StatusCode(500)); + }; + + // Restart the timers to apply the new settings + let stopResult = await stopTimerExecution(); + ignore startTimerExecution(msg.caller, "updateAgentSettings"); + + return #Ok({ status_code = 200 }); + }; + + // Respond to challenges + + private func getChallengeFromGameStateCanister() : async Types.ChallengeResult { + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling getRandomOpenChallenge of gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); + let result : Types.ChallengeResult = await gameStateCanisterActor.getRandomOpenChallenge(); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): getRandomOpenChallenge returned."); + return result; + }; + + private func processRespondingToChallenge(challengeQueueInput : Types.ChallengeQueueInput) : async () { + // Generate the response for the challengeQueueInput and: + // (-) 'Own' canister submits it to GameState + // (-) 'ShareService' canister sends it back to the 'ShareAgent' canister which submits it to GameState + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processRespondingToChallenge - calling respondToChallengeDoIt_"); + let respondingResult : Types.ChallengeResponseResult = await respondToChallengeDoIt_(challengeQueueInput); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processRespondingToChallenge - returned from respondToChallengeDoIt_"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondingResult = " # debug_show (respondingResult)); + + switch (respondingResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processRespondingToChallenge error" # debug_show (error)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): WARNING - ShareService is likely broken & admin must call resetChallengeQueueAdmin of the ShareAgent " # debug_show(challengeQueueInput.challengeQueuedBy) # " once the ShareService is fixed"); + // TODO - Error Handling + // TODO - Design: in case of ShareService, do we refund the cycles to the ShareAgent? + // NOTE: + // - We are NOT sending anything back to the ShareAgent. + // - This is the safest approach to avoid sucking all cycles out of the ShareAgent in case the ShareService is not working + // - The ShareAgent's challengeQueue will simply fill up with challenges that cannot be processed + // + // -> Admin must run a script to reset the challengeQueue of all the ShareAgent caniseters once the ShareService is fixed + }; + case (#Ok(respondingOutput : Types.ChallengeResponse)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processRespondingToChallenge - calling putGeneratedResponse"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondingOutput = " # debug_show (respondingOutput)); + // TODO - Implementation: adapt cycles burnt stats + ignore increaseTotalCyclesBurnt(CYCLES_BURNT_RESPONSE_GENERATION); + + var submittedBy : Principal = Principal.fromActor(this); + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + // Prefill this, so the ShareAgent canister can submit it with the correct Principal + submittedBy := challengeQueueInput.challengeQueuedBy; + }; + let challengeResponseSubmissionInput : Types.ChallengeResponseSubmissionInput = { + challengeTopic : Text = challengeQueueInput.challengeTopic; + challengeTopicId : Text = challengeQueueInput.challengeTopicId; + challengeTopicCreationTimestamp : Nat64 = challengeQueueInput.challengeTopicCreationTimestamp; + challengeTopicStatus : Types.ChallengeTopicStatus = challengeQueueInput.challengeTopicStatus; + cyclesGenerateChallengeGsChctrl : Nat = challengeQueueInput.cyclesGenerateChallengeGsChctrl; + cyclesGenerateChallengeChctrlChllm : Nat = challengeQueueInput.cyclesGenerateChallengeChctrlChllm; + challengeQuestion : Text = challengeQueueInput.challengeQuestion; + challengeQuestionSeed : Nat32 = challengeQueueInput.challengeQuestionSeed; + mainerPromptId : Text = challengeQueueInput.mainerPromptId; + mainerMaxContinueLoopCount : Nat = challengeQueueInput.mainerMaxContinueLoopCount; + mainerNumTokens : Nat64 = challengeQueueInput.mainerNumTokens; + mainerTemp : Float = challengeQueueInput.mainerTemp; + judgePromptId : Text = challengeQueueInput.judgePromptId; + challengeId : Text = challengeQueueInput.challengeId; + challengeCreationTimestamp : Nat64 = challengeQueueInput.challengeCreationTimestamp; + challengeCreatedBy : Types.CanisterAddress = challengeQueueInput.challengeCreatedBy; + challengeStatus : Types.ChallengeStatus = challengeQueueInput.challengeStatus; + challengeClosedTimestamp : ?Nat64 = challengeQueueInput.challengeClosedTimestamp; + cyclesSubmitResponse : Nat = challengeQueueInput.cyclesSubmitResponse; + protocolOperationFeesCut : Nat = challengeQueueInput.protocolOperationFeesCut; + cyclesGenerateResponseSactrlSsctrl : Nat = challengeQueueInput.cyclesGenerateResponseSactrlSsctrl; + cyclesGenerateResponseSsctrlGs : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlGs; + cyclesGenerateResponseSsctrlSsllm : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; + cyclesGenerateResponseOwnctrlGs : Nat = challengeQueueInput.cyclesGenerateResponseOwnctrlGs; + cyclesGenerateResponseOwnctrlOwnllmLOW : Nat = challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmLOW; + cyclesGenerateResponseOwnctrlOwnllmMEDIUM : Nat = challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmMEDIUM; + cyclesGenerateResponseOwnctrlOwnllmHIGH : Nat = challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; + challengeQueuedId : Text = challengeQueueInput.challengeQueuedId; + challengeQueuedBy : Principal = challengeQueueInput.challengeQueuedBy; + challengeQueuedTo : Principal = challengeQueueInput.challengeQueuedTo; + challengeQueuedTimestamp : Nat64 = challengeQueueInput.challengeQueuedTimestamp; + challengeAnswer : Text = respondingOutput.generatedResponseText; + challengeAnswerSeed : Nat32 = respondingOutput.generationSeed; + submittedBy : Principal = submittedBy; + }; + + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + // Send the response back to the ShareAgent canister + ignore sendResponseToShareAgent(challengeResponseSubmissionInput); + } else { + ignore storeAndSubmitResponse(challengeResponseSubmissionInput); + }; + }; + }; + }; + + private func sendResponseToShareAgent(challengeResponseSubmissionInput : Types.ChallengeResponseSubmissionInput) : async () { + let shareAgentCanisterActor = actor (Principal.toText(challengeResponseSubmissionInput.challengeQueuedBy)) : Types.MainerCanister_Actor; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): sendResponseToShareAgent- calling addChallengeResponseToShareAgent of shareAgentCanisterActor = " # Principal.toText(Principal.fromActor(shareAgentCanisterActor))); + let result : Types.StatusCodeRecordResult = await shareAgentCanisterActor.addChallengeResponseToShareAgent(challengeResponseSubmissionInput); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): sendResponseToShareAgent - returned from addChallengeResponseToShareAgent.challengeResponseSubmissionInput with result = " # debug_show(result)); + }; + + // Callback function of ShareAgent canister to receive the challengeResponseSubmissionInput from the ShareService canister + public shared (msg) func addChallengeResponseToShareAgent(challengeResponseSubmissionInput : Types.ChallengeResponseSubmissionInput) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + + // Only ShareAgent can handle this call + if (MAINER_AGENT_CANISTER_TYPE != #ShareAgent) { + return #Err(#Unauthorized); + }; + + // Only the ShareService canister may call this + if (Principal.toText(msg.caller) != SHARE_SERVICE_CANISTER_ID) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addChallengeResponseToShareAgent - caller is not a ShareService"); + return #Err(#Unauthorized); + }; + // Check that the record looks correct + + // queuedBy must be this canister + if (challengeResponseSubmissionInput.challengeQueuedBy != Principal.fromActor(this)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addChallengeResponseToShareAgent - challengeQueuedBy error"); + return #Err(#Unauthorized); + }; + + // queuedTo must be the caller + if (challengeResponseSubmissionInput.challengeQueuedTo != msg.caller) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addChallengeResponseToShareAgent - challengeQueuedTo error"); + return #Err(#Unauthorized); + }; + + // The entry must exist in the ShareAgent canisters own ChallengeQueue + let challengeQueuedId = challengeResponseSubmissionInput.challengeQueuedId; + let challengeQueueInput : ?Types.ChallengeQueueInput = getChallengeQueueFromId(challengeQueuedId); + if (challengeQueueInput == null) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addChallengeResponseToShareAgent - challengeQueuedId error"); + return #Err(#Unauthorized); + }; + + // Ok, all looks kosher + let _ = removeChallengeQueue(challengeQueuedId); + ignore storeAndSubmitResponse(challengeResponseSubmissionInput); + + return #Ok({ status_code = 200 }); + }; + + private func storeAndSubmitResponse(challengeResponseSubmissionInput : Types.ChallengeResponseSubmissionInput) : async () { + // Store the generated response + let storeResult : Bool = putGeneratedResponse(challengeResponseSubmissionInput); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - returned from putGeneratedResponse"); + + switch (storeResult) { + case (false) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - storeResult error"); + // TODO - Error Handling + }; + case (true) { + // Check if the canister still has enough cycles to submit it + // Check against the number sent by the GameState for this particular Challenge + if (not sufficientCyclesToSubmit(challengeResponseSubmissionInput.cyclesSubmitResponse)) { + // Note: do not pause, to avoid blocking the canister in case of a single challenge with a really high cycle requirement + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - insufficientCyclesToSubmit"); + return; + }; + + // Check if there were any unofficial cycle top ups and if so pay the appropriate fee for the Protocol's operational expenses + var cyclesToSend = challengeResponseSubmissionInput.cyclesSubmitResponse; + let currentCyclesBalance = Cycles.balance(); + if (officialCyclesBalance < currentCyclesBalance) { + // Unofficial top ups were made, thus pay the fee for these top ups to Game State now as a share of the balances difference + // Use protocolOperationFeesCut that was sent by the GameState canister with the Challenge + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - Unofficial top ups were made"); + try { + let DIRECT_CYCLES_TOPUP_MULTIPLIER : Nat = 3; + let cyclesForOperationalExpenses = (currentCyclesBalance - officialCyclesBalance) * (challengeResponseSubmissionInput.protocolOperationFeesCut * DIRECT_CYCLES_TOPUP_MULTIPLIER) / 100; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - Increasing cycles for operational expenses = " # debug_show(cyclesForOperationalExpenses)); + cyclesToSend := cyclesToSend + cyclesForOperationalExpenses; + } catch (error : Error) { + // Continue nevertheless + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - catch error when calculating fee to pay for unofficial top ups : "); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - error: " # Error.message(error)); + }; + }; + + // Add the required amount of cycles + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - calling Cycles.add for = " # debug_show(cyclesToSend) # " Cycles"); + Cycles.add(cyclesToSend); + + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - calling submitChallengeResponse of gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); + let submitMetadaResult : Types.ChallengeResponseSubmissionMetadataResult = await gameStateCanisterActor.submitChallengeResponse(challengeResponseSubmissionInput); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - returned from gameStateCanisterActor.submitChallengeResponse"); + switch (submitMetadaResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - submitMetada error"); + D.print(debug_show (error)); + // TODO - Error Handling + }; + case (#Ok(submitMetada : Types.ChallengeResponseSubmissionMetadata)) { + // Successfully submitted to Game State + let challengeResponseSubmission : Types.ChallengeResponseSubmission = { + challengeTopic : Text = challengeResponseSubmissionInput.challengeTopic; + challengeTopicId : Text = challengeResponseSubmissionInput.challengeTopicId; + challengeTopicCreationTimestamp : Nat64 = challengeResponseSubmissionInput.challengeTopicCreationTimestamp; + challengeTopicStatus : Types.ChallengeTopicStatus = challengeResponseSubmissionInput.challengeTopicStatus; + cyclesGenerateChallengeGsChctrl : Nat = challengeResponseSubmissionInput.cyclesGenerateChallengeGsChctrl; + cyclesGenerateChallengeChctrlChllm : Nat = challengeResponseSubmissionInput.cyclesGenerateChallengeChctrlChllm; + challengeQuestion : Text = challengeResponseSubmissionInput.challengeQuestion; + challengeQuestionSeed : Nat32 = challengeResponseSubmissionInput.challengeQuestionSeed; + mainerPromptId : Text = challengeResponseSubmissionInput.mainerPromptId; + mainerMaxContinueLoopCount : Nat = challengeResponseSubmissionInput.mainerMaxContinueLoopCount; + mainerNumTokens : Nat64 = challengeResponseSubmissionInput.mainerNumTokens; + mainerTemp : Float = challengeResponseSubmissionInput.mainerTemp; + judgePromptId : Text = challengeResponseSubmissionInput.judgePromptId; + challengeId : Text = challengeResponseSubmissionInput.challengeId; + challengeCreationTimestamp : Nat64 = challengeResponseSubmissionInput.challengeCreationTimestamp; + challengeCreatedBy : Types.CanisterAddress = challengeResponseSubmissionInput.challengeCreatedBy; + challengeStatus : Types.ChallengeStatus = challengeResponseSubmissionInput.challengeStatus; + challengeClosedTimestamp : ?Nat64 = challengeResponseSubmissionInput.challengeClosedTimestamp; + cyclesSubmitResponse : Nat = challengeResponseSubmissionInput.cyclesSubmitResponse; + protocolOperationFeesCut : Nat = challengeResponseSubmissionInput.protocolOperationFeesCut; + cyclesGenerateResponseSactrlSsctrl : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseSactrlSsctrl; + cyclesGenerateResponseSsctrlGs : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseSsctrlGs; + cyclesGenerateResponseSsctrlSsllm : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseSsctrlSsllm; + cyclesGenerateResponseOwnctrlGs : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseOwnctrlGs; + cyclesGenerateResponseOwnctrlOwnllmLOW : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseOwnctrlOwnllmLOW; + cyclesGenerateResponseOwnctrlOwnllmMEDIUM : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseOwnctrlOwnllmMEDIUM; + cyclesGenerateResponseOwnctrlOwnllmHIGH : Nat = challengeResponseSubmissionInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; + challengeQueuedId : Text = challengeResponseSubmissionInput.challengeQueuedId; + challengeQueuedBy : Principal = challengeResponseSubmissionInput.challengeQueuedBy; + challengeQueuedTo : Principal = challengeResponseSubmissionInput.challengeQueuedTo; + challengeQueuedTimestamp : Nat64 = challengeResponseSubmissionInput.challengeQueuedTimestamp; + challengeAnswer : Text = challengeResponseSubmissionInput.challengeAnswer; + challengeAnswerSeed : Nat32 = challengeResponseSubmissionInput.challengeAnswerSeed; + submittedBy : Principal = challengeResponseSubmissionInput.submittedBy; + submissionId : Text = submitMetada.submissionId; + submittedTimestamp : Nat64 = submitMetada.submittedTimestamp; + submissionStatus : Types.ChallengeResponseSubmissionStatus = submitMetada.submissionStatus; + cyclesGenerateScoreGsJuctrl : Nat = submitMetada.cyclesGenerateScoreGsJuctrl; + cyclesGenerateScoreJuctrlJullm : Nat = submitMetada.cyclesGenerateScoreJuctrlJullm; + }; + // Update official cycles balance after the successful submission + // Any outstanding top up fees were paid and it's reflected in cyclesToSend + officialCyclesBalance := currentCyclesBalance - cyclesToSend; + // Sanity check + let newCyclesBalance = Cycles.balance(); + if (officialCyclesBalance < newCyclesBalance) { + D.print("mAIner storeAndSubmitResponse - after updating the official cycles balance, it is still smaller than the actual balance"); + D.print("mAIner storeAndSubmitResponse - officialCyclesBalance: " # debug_show(officialCyclesBalance)); + D.print("mAIner storeAndSubmitResponse - newCyclesBalance: " # debug_show(newCyclesBalance)); + }; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - calling putSubmittedResponse"); + let putResult = putSubmittedResponse(challengeResponseSubmission); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - return from putSubmittedResponse"); + switch (putResult) { + case (false) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): storeAndSubmitResponse - putResult error"); + // TODO - Error Handling + }; + case (true) { + // TODO - Implementation: adapt cycles burnt stats - also, check we're not counting double... + ignore increaseTotalCyclesBurnt(CYCLES_BURNT_RESPONSE_GENERATION); + }; + }; + }; + }; + }; + }; + }; + + private func respondToChallengeDoIt_(challengeQueueInput : Types.ChallengeQueueInput) : async Types.ChallengeResponseResult { + let maxContinueLoopCount : Nat = challengeQueueInput.mainerMaxContinueLoopCount; // After this many calls to run_update, we stop. + let num_tokens : Nat64 = challengeQueueInput.mainerNumTokens; // Mostly we stop after maxContinueLoopCount update calls & this is never actually used + let temp : Float = challengeQueueInput.mainerTemp; + + // -------------------------------------------------------- + // var promptRepetitive : Text = "<|im_start|>user\nAnswer the following question as brief as possible. This is the question: "; + // var prompt : Text = promptRepetitive # challengeQueueInput.challengeQuestion # "\n<|im_end|>\n<|im_start|>assistant\n"; + let mainerPromptId : Text = challengeQueueInput.mainerPromptId; + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling getMainerPromptInfo of gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); + let mainerPromptInfoResult : Types.MainerPromptInfoResult = await gameStateCanisterActor.getMainerPromptInfo(mainerPromptId); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): getMainerPromptInfo returned."); + var prompt : Text = ""; + var promptCacheSha256 : Text = ""; + var promptSaveCache : Text = ""; // We will upload this into the LLM canister + var promptCacheNumberOfChunks : Nat = 0; + switch (mainerPromptInfoResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): getMainerPromptInfo error " # debug_show (error)); + return #Err(error); + }; + case (#Ok(mainerPromptInfo)) { + prompt := mainerPromptInfo.promptText; + promptCacheSha256 := mainerPromptInfo.promptCacheSha256; + promptSaveCache := mainerPromptInfo.promptCacheFilename; + promptCacheNumberOfChunks := mainerPromptInfo.promptCacheNumberOfChunks; + }; + }; + + // -------------------------------------------------------- + let llmCanister = _getRoundRobinCanister(); + let llmCanisterPrincipal : Principal = Principal.fromActor(llmCanister); + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - llmCanister = " # Principal.toText(Principal.fromActor(llmCanister))); + + // Check health of llmCanister + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling health endpoint of LLM"); + let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.health(); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - returned from health endpoint of LLM with : "); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - statusCodeRecordResult: " # debug_show (statusCodeRecordResult)); + switch (statusCodeRecordResult) { + case (#Err(error)) { + return #Err(error); + }; + case (#Ok(_statusCodeRecord)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - LLM is healthy"); + }; + }; + + // First send cycles to the LLM, if enabled + if (SEND_CYCLES_TO_LLM) { + var cyclesAdded : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; + if (MAINER_AGENT_CANISTER_TYPE == #Own) { + cyclesAdded := challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; // TODO: adjust for mAIners with setting LOW or MEDIUM + }; + try { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling Cycles.add for = " # debug_show(cyclesAdded) # " Cycles"); + Cycles.add(cyclesAdded); + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling IC0.deposit_cycles for LLM " # debug_show(llmCanisterPrincipal)); + let deposit_cycles_args = { canister_id : Principal = llmCanisterPrincipal; }; + let _ = await IC0.deposit_cycles(deposit_cycles_args); + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Successfully deposited " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal) ); + } catch (e) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit error is" # Error.message(e)); + + return #Err(#FailedOperation); + }; + }; + + let generationId : Text = await Utils.newRandomUniqueId(); + + // Use the generationId to create a highly variable seed for the LLM + let seed : Nat32 = Utils.getRandomLlmSeed(generationId); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - seed = " # debug_show(seed)); + + var generationOutput : Text = ""; + let generationPrompt : Text = prompt; + + // The prompt cache file + let promptCache : Text = generationId # ".cache"; + + // Start the generation for this challengeQueueInput + var num_update_calls : Nat64 = 0; + + // data returned from new_chat + var status_code : Nat16 = 0; + var output : Text = ""; + var conversation : Text = ""; + var error : Text = ""; + var prompt_remaining : Text = ""; + var generated_eog : Bool = false; + + // ---------------------------------------------------------------------- + // Step 0 + // Restore a previously saved prompt cache file + // let promptSaveCache : Text = Nat32.toText(Text.hash(promptRepetitive)) # ".cache"; + // We will check if the one from the Challenger is already in this LLM + var foundPromptSaveCache : Bool = false; + + try { + let copyPromptCacheInputRecord : Types.CopyPromptCacheInputRecord = { + from = promptSaveCache; + to = promptCache + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling copy_prompt_cache to restore a previously saved promptCache if it exists. promptSaveCache: " # promptSaveCache); + num_update_calls += 1; + let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.copy_prompt_cache(copyPromptCacheInputRecord); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): returned from copy_prompt_cache with statusCodeRecordResult: " # debug_show (statusCodeRecordResult)); + switch (statusCodeRecordResult) { + case (#Err(_)) { + foundPromptSaveCache := false; + }; + case (#Ok(_)) { + foundPromptSaveCache := true; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - foundPromptSaveCache ! (no need to get it again from Gamestate.) " # debug_show(promptCache)); + }; + }; + } catch (error : Error) { + // Handle errors, such as llm canister not responding + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): catch error when calling copy_prompt_cache : "); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): error: " # Error.message(error)); + return #Err( + #Other( + "Failed call to copy_prompt_cache of " # Principal.toText(Principal.fromActor(llmCanister)) # + " with error: " # Error.message(error) + ) + ); + }; + + if (not foundPromptSaveCache) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Did not find promptCache (will get it from Gamestate & upload to LLM) " # debug_show(promptCache)); + let mainerPromptCacheBuffer : Buffer.Buffer = Buffer.Buffer(0); + for (i in Iter.range(0, promptCacheNumberOfChunks - 1)) { + var delay : Nat = 2_000_000_000; // 2 seconds + let maxAttempts : Nat = 8; + let downloadMainerPromptCacheBytesChunkInput : Types.DownloadMainerPromptCacheBytesChunkInput = { + mainerPromptId = mainerPromptId; + chunkID = i; + }; + let downloadMainerPromptCacheBytesChunkRecordResult: Types.DownloadMainerPromptCacheBytesChunkRecordResult = await retryGameStateMainerPromptCacheChunkDownloadWithDelay(gameStateCanisterActor, downloadMainerPromptCacheBytesChunkInput, maxAttempts, delay); + switch (downloadMainerPromptCacheBytesChunkRecordResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # ") - ERROR during download of mAIner prompt cache chunk - statusCodeRecordResult:" # debug_show (statusCodeRecordResult)); + return #Err(error); + }; + case (#Ok(downloadMainerPromptCacheBytesChunkRecord)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # ") - download of mAIner prompt cache chunk successful - chunkID: " # debug_show (downloadMainerPromptCacheBytesChunkRecord.chunkID)); + mainerPromptCacheBuffer.add(downloadMainerPromptCacheBytesChunkRecord.bytesChunk); + }; + }; + }; + + // --------------------------------------------------------- + // Upload prompt cache file + var chunkSize : Nat = 0; + var offset : Nat = 0; + var nextChunk : [Nat8] = []; + + // For progress reporting + var modelUploadProgress : Nat8 = 0; + let modelUploadProgressInterval : Nat = 10; // 10% progress interval + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Downloaded the promptCache from Gamestate. Will now upload to LLM - " # debug_show(promptCache)); + var chunkCount : Nat = 0; + let totalChunks : Nat = mainerPromptCacheBuffer.size(); + var nextProgressThreshold : Nat = 0; + + var fileUploadRecordResult : Types.FileUploadRecordResult = #Ok({ filename = promptCache; filesha256 = ""; filesize = 0 }); // Placeholder + for (chunk in mainerPromptCacheBuffer.vals()) { + var progress : Nat = (chunkCount * 100) / totalChunks; // Integer division rounds down + if (chunkCount + 1 == totalChunks) { + progress := 100; // Set to 100% for the last chunk + }; + if (progress >= nextProgressThreshold) { + modelUploadProgress := Nat8.fromNat(nextProgressThreshold); // Set to 0, 10, 20, ..., 100 + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - uploading promptCache chunk " # debug_show (chunkCount) # "(modelUploadProgress = " # debug_show (modelUploadProgress) # "%)"); + nextProgressThreshold += modelUploadProgressInterval; + }; + chunkCount := chunkCount + 1; + + nextChunk := Blob.toArray(chunk); + chunkSize := nextChunk.size(); + let uploadChunk : Types.UploadPromptCacheInputRecord = { + promptcache = promptCache; + chunk = nextChunk; + chunksize = Nat64.fromNat(chunkSize); + offset = Nat64.fromNat(offset); + }; + + var delay : Nat = 2_000_000_000; // 2 seconds + let maxAttempts : Nat = 8; + fileUploadRecordResult := await retryLlmPrompCacheChunkUploadWithDelay(llmCanister, uploadChunk, maxAttempts, delay); + switch (fileUploadRecordResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - ERROR uploading a promptCache chunk - uploadModelFileResult:"); + D.print(debug_show (fileUploadRecordResult)); + return #Err(error); + }; + case (#Ok(_)) { + // all good, continue with next chunk + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - success uploading a promptCache chunk - fileUploadRecordResult = " # debug_show (fileUploadRecordResult)); + offset := offset + chunkSize; + }; + }; + }; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - after prompt cache upload -- checking filesha256."); + switch (fileUploadRecordResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - ERROR - fileUploadRecordResult:" # debug_show (fileUploadRecordResult)); + return #Err(error); + }; + case (#Ok(fileUploadRecordResult)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - fileUploadRecordResult" # debug_show (fileUploadRecordResult)); + // Check the sha256 + let filesha256 : Text = fileUploadRecordResult.filesha256; + let expectedSha256 : Text = promptCacheSha256; + + if (not (filesha256 == expectedSha256)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - ERROR: filesha256 = " # debug_show (filesha256) # "does not match expectedSha256 = " # debug_show (expectedSha256)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - THIS IS A TODO FOR THE CHALLENGER !!!"); + // TODO - Challenger must set the promptCacheSha256 + // return #Err(#Other("The sha256 of the uploaded llm file is " # filesha256 # ", which does not match the expected value of " # expectedSha256)); + } else { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - filesha256 matches expectedSha256 = " # debug_show (expectedSha256)); + }; + }; + }; + + // ----- + // Save the prompt cache for reuse with next submission using the same prompt + try { + let copyPromptCacheInputRecord : Types.CopyPromptCacheInputRecord = { + from = promptCache; + to = promptSaveCache + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling copy_prompt_cache to save the uploaded promptCache (" # promptCache # ") to promptSaveCache: " # promptSaveCache); + let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.copy_prompt_cache(copyPromptCacheInputRecord); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): returned from copy_prompt_cache with statusCodeRecordResult: " # debug_show (statusCodeRecordResult)); + switch (statusCodeRecordResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - ERROR - statusCodeRecordResult:" # debug_show (fileUploadRecordResult)); + return #Err(error); + }; + case (#Ok(_)) { + foundPromptSaveCache := true; + }; + }; + } catch (error : Error) { + // Handle errors, such as llm canister not responding + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): catch error when calling copy_prompt_cache : " # Error.message(error)); + return #Err( + #Other( + "Failed call to copy_prompt_cache of " # Principal.toText(Principal.fromActor(llmCanister)) # + " with error: " # Error.message(error) + ) + ); + }; + }; + + // ---------------------------------------------------------------------- + // Step 1 + // Call new_chat + try { + let args : [Text] = [ + "--prompt-cache", + promptCache, + ]; + let inputRecord : Types.InputRecord = { args = args }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling new_chat..."); + // D.print(debug_show (args)); + num_update_calls += 1; + let outputRecordResult : Types.OutputRecordResult = await llmCanister.new_chat(inputRecord); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - returned from new_chat with outputRecordResult: "); + // D.print(debug_show (outputRecordResult)); + + switch (outputRecordResult) { + case (#Err(error)) { + return #Err(error); + }; + case (#Ok(outputRecord)) { + // the generated tokens + status_code := outputRecord.status_code; + output := outputRecord.output; + conversation := outputRecord.conversation; + error := outputRecord.error; + prompt_remaining := outputRecord.prompt_remaining; + generated_eog := outputRecord.generated_eog; + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - status_code : " # debug_show (status_code)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - output : " # debug_show (output)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - conversation : " # debug_show (conversation)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - error : " # debug_show (error)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - prompt_remaining : " # debug_show (prompt_remaining)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - generated_eog : " # debug_show (generated_eog)); + }; + }; + } catch (error : Error) { + // Handle errors, such as llm canister not responding + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - catch error when calling new_chat : "); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - error: " # Error.message(error)); + return #Err( + #Other( + "Failed call to new_chat of " # Principal.toText(Principal.fromActor(llmCanister)) # + " with error: " # Error.message(error) + ) + ); + }; + + // ---------------------------------------------------------------------- + // Step 2 + // (A) Ingest the prompt into the prompt-cache, using multiple update calls + // (-) Repeat call with full prompt until `prompt_remaining` in the response is empty. + // (-) The first part of the challengeQueueInput will be generated too. + // (B) Generate rest of challengeQueueInput, using multiple update calls + // (-) Repeat call with empty prompt until `generated_eog` in the response is `true`. + // (-) The rest of the challengeQueueInput will be generated. + + // Avoid endless loop by limiting the number of iterations + var continueLoopCount : Nat = 0; + label continueLoop while (continueLoopCount < maxContinueLoopCount) { + try { + let args = [ + "--prompt-cache", + promptCache, + "--prompt-cache-all", + "--simple-io", + "--no-display-prompt", // only return generated text + "-n", + Nat64.toText(num_tokens), + "--seed", + Nat32.toText(seed), + "--temp", + Float.toText(temp), + "-p", + prompt, + ]; + let inputRecord : Types.InputRecord = { args = args }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling run_update..."); + // D.print(debug_show (args)); + num_update_calls += 1; + if (num_update_calls > 30) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - too many calls run_update - Breaking out of loop..."); + break continueLoop; // Protective break for endless loop. + }; + let outputRecordResult : Types.OutputRecordResult = await llmCanister.run_update(inputRecord); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - INGESTING PROMPT:returned from run_update with outputRecordResult: "); + // D.print(debug_show (outputRecordResult)); + + switch (outputRecordResult) { + case (#Err(error)) { + return #Err(error); + }; + case (#Ok(outputRecord)) { + // the generated tokens + status_code := outputRecord.status_code; + output := outputRecord.output; + conversation := outputRecord.conversation; + error := outputRecord.error; + prompt_remaining := outputRecord.prompt_remaining; + generated_eog := outputRecord.generated_eog; + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - status_code : " # debug_show (status_code)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - output : " # debug_show (output)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - conversation : " # debug_show (conversation)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - error : " # debug_show (error)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - prompt_remaining : " # debug_show (prompt_remaining)); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - generated_eog : " # debug_show (generated_eog)); + + generationOutput := generationOutput # output; + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - generationOutput : " # debug_show (generationOutput)); + + if (prompt_remaining == "") { + prompt := ""; // Send empty prompt - the prompt ingestion is done. + continueLoopCount += 1; // We count the actual generation steps + // NO LONGER NEEDED - WE leave it here for now in case want to restore the logic in future + // // ----- + // // Prompt ingestion is finished. If it was not yet there, save the prompt cache for reuse with next submission + // if (not foundPromptSaveCache) { + // try { + // let copyPromptCacheInputRecord : Types.CopyPromptCacheInputRecord = { + // from = promptCache; + // to = promptSaveCache + // }; + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling copy_prompt_cache to save the promptCache to promptSaveCache: " # promptSaveCache); + // num_update_calls += 1; + // let statusCodeRecordResult : Types.StatusCodeRecordResult = await llmCanister.copy_prompt_cache(copyPromptCacheInputRecord); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): returned from copy_prompt_cache with statusCodeRecordResult: " # debug_show (statusCodeRecordResult)); + // // We do not care what the result is, as it is just a possible optimization operation + // } catch (error : Error) { + // // Handle errors, such as llm canister not responding + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): catch error when calling copy_prompt_cache : "); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): error: " # Error.message(error)); + // return #Err( + // #Other( + // "Failed call to copy_prompt_cache of " # Principal.toText(Principal.fromActor(llmCanister)) # + // " with error: " # Error.message(error) + // ) + // ); + // }; + // }; + }; + if (generated_eog) { + break continueLoop; // Exit the loop - the mAIner response is generated. + }; + }; + }; + } catch (error : Error) { + // Handle errors, such as llm canister not responding + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - catch error when calling new_chat : "); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - error: " # Error.message(error)); + return #Err( + #Other( + "Failed call to run_update of " # Principal.toText(Principal.fromActor(llmCanister)) # + " with error: " # Error.message(error) + ) + ); + }; + }; + + // Delete the prompt cache in the LLM + try { + let args : [Text] = [ + "--prompt-cache", + promptCache, + ]; + let inputRecord : Types.InputRecord = { args = args }; + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling remove_prompt_cache with args: "); + // D.print(debug_show (args)); + num_update_calls += 1; + let outputRecordResult : Types.OutputRecordResult = await llmCanister.remove_prompt_cache(inputRecord); + // D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - returned from remove_prompt_cache with outputRecordResult: "); + // D.print(debug_show (outputRecordResult)); + + } catch (error : Error) { + // Handle errors, such as llm canister not responding + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - catch error when calling remove_prompt_cache : "); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - error: " # Error.message(error)); + return #Err( + #Other( + "Failed call to remove_prompt_cache of " # Principal.toText(Principal.fromActor(llmCanister)) # + " with error: " # Error.message(error) + ) + ); + }; + + // Return the generated response + let responseOutput : Types.ChallengeResponse = { + challengeId :Text = challengeQueueInput.challengeId; + generationId : Text = generationId; + generationSeed : Nat32 = seed; + generatedTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + generatedByLlmId : Text = Principal.toText(Principal.fromActor(llmCanister)); + generationPrompt : Text = generationPrompt; + generatedResponseText : Text = generationOutput; + }; + return #Ok(responseOutput); + }; + + // Downloads a chunk of the mAIner prompt cache file from the GameState canister + private func retryGameStateMainerPromptCacheChunkDownloadWithDelay(gameStateCanisterActor : Types.GameStateCanister_Actor, downloadMainerPromptCacheBytesChunkInput : Types.DownloadMainerPromptCacheBytesChunkInput, attempts : Nat, delay : Nat) : async Types.DownloadMainerPromptCacheBytesChunkRecordResult { + if (attempts > 0) { + try { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): - retryGameStateMainerPromptCacheChunkDownloadWithDelay - calling gameStateCanisterActor.downloadMainerPromptCacheBytesChunk for mainerPromptId, chunkID = " # debug_show (downloadMainerPromptCacheBytesChunkInput.mainerPromptId) # ", " # debug_show (downloadMainerPromptCacheBytesChunkInput.chunkID)); + let downloadMainerPromptCacheBytesChunkRecordResult : Types.DownloadMainerPromptCacheBytesChunkRecordResult = await gameStateCanisterActor.downloadMainerPromptCacheBytesChunk(downloadMainerPromptCacheBytesChunkInput); + return downloadMainerPromptCacheBytesChunkRecordResult; + + } catch (e) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): - retryGameStateMainerPromptCacheChunkDownloadWithDelay - gameStateCanisterActor.uploadMainerPromptCacheBytesChunk failed with catch error " # Error.message(e) # ", retrying in " # debug_show(delay) # " nanoseconds"); + + // TODO - Implementation: introduce a delay using a timer... + // Just retry immediately with decremented attempts + return await retryGameStateMainerPromptCacheChunkDownloadWithDelay(gameStateCanisterActor, downloadMainerPromptCacheBytesChunkInput, attempts - 1, delay); + }; + } else { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): - retryGameStateMainerPromptCacheChunkDownloadWithDelay - Max retry attempts reached"); + return #Err(#Other("Max retry attempts reached")); + }; + }; + + // Uploads a chunk of the promptCache file to the LLM canister + private func retryLlmPrompCacheChunkUploadWithDelay(llmCanisterActor : Types.LLMCanister, uploadChunk : Types.UploadPromptCacheInputRecord, attempts : Nat, delay : Nat) : async Types.FileUploadRecordResult { + if (attempts > 0) { + try { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): - retryLlmPrompCacheChunkUploadWithDelay - calling upload_prompt_cache_chunk for chunksize, offset = " # debug_show (uploadChunk.chunksize) # ", " # debug_show (uploadChunk.offset)); + let uploadModelFileResult : Types.FileUploadRecordResult = await llmCanisterActor.upload_prompt_cache_chunk(uploadChunk); + return uploadModelFileResult; + + } catch (e) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): - retryLlmPrompCacheChunkUploadWithDelay - LLM upload_prompt_cache_chunk failed with catch error " # Error.message(e) # ", retrying in " # debug_show(delay) # " nanoseconds"); + + // TODO - Implementation: introduce a delay using a timer... + // Just retry immediately with decremented attempts + return await retryLlmPrompCacheChunkUploadWithDelay(llmCanisterActor, uploadChunk, attempts - 1, delay); + }; + } else { + D.print("Max retry attempts reached"); + return #Err(#Other("Max retry attempts reached")); + }; + }; + + // Triggered by timer 1: get next challenge and add it to the queue + private func pullNextChallenge() : async () { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - entered"); + + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + // This should never happen, but still protect against it + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - Something is wrong. pullNextChallenge should not be called by a ShareService."); + return; + }; + + // ----------------------------------------------------- + // Check if the queue already has enough challenges + if (List.size(challengeQueue) >= MAX_CHALLENGES_IN_QUEUE) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - Already have enough Challenges in the queue. Not adding more."); + return; + }; + + // ----------------------------------------------------- + // Get the next challenge from GameState canister + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - calling getChallengeFromGameStateCanister."); + let challengeResult : Types.ChallengeResult = await getChallengeFromGameStateCanister(); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - received challengeResult from getChallengeFromGameStateCanister: " # debug_show (challengeResult)); + switch (challengeResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - challengeResult error : " # debug_show (error)); + // TODO - Error Handling + }; + case (#Ok(challenge : Types.Challenge)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - challenge = " # debug_show (challenge)); + + if (not sufficientCyclesToProcessChallenge(challenge)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - PAUSING RESPONSE GENERATION DUE TO LOW CYCLE BALANCE"); + PAUSED_DUE_TO_LOW_CYCLE_BALANCE := true; + return; + }; + // Ok,the canister has enough cycles + PAUSED_DUE_TO_LOW_CYCLE_BALANCE := false; + + // Add the challenge to the queue + let challengeQueuedId : Text = await Utils.newRandomUniqueId(); + let challengeQueuedBy : Principal = Principal.fromActor(this); + let challengeQueuedTo : Principal = Principal.fromActor(shareServiceCanisterActor); + + var challengeQueueInput : Types.ChallengeQueueInput = { + challengeTopic : Text = challenge.challengeTopic; + challengeTopicId : Text = challenge.challengeTopicId; + challengeTopicCreationTimestamp : Nat64 = challenge.challengeTopicCreationTimestamp; + challengeTopicStatus : Types.ChallengeTopicStatus = challenge.challengeTopicStatus; + cyclesGenerateChallengeGsChctrl : Nat = challenge.cyclesGenerateChallengeGsChctrl; + cyclesGenerateChallengeChctrlChllm : Nat = challenge.cyclesGenerateChallengeChctrlChllm; + challengeQuestion : Text = challenge.challengeQuestion; + challengeQuestionSeed : Nat32 = challenge.challengeQuestionSeed; + mainerPromptId : Text = challenge.mainerPromptId; + mainerMaxContinueLoopCount : Nat = challenge.mainerMaxContinueLoopCount; + mainerNumTokens : Nat64 = challenge.mainerNumTokens; + mainerTemp : Float = challenge.mainerTemp; + judgePromptId : Text = challenge.judgePromptId; + challengeId : Text = challenge.challengeId; + challengeCreationTimestamp : Nat64 = challenge.challengeCreationTimestamp; + challengeCreatedBy : Types.CanisterAddress = challenge.challengeCreatedBy; + challengeStatus : Types.ChallengeStatus = challenge.challengeStatus; + challengeClosedTimestamp : ?Nat64 = challenge.challengeClosedTimestamp; + cyclesSubmitResponse : Nat = challenge.cyclesSubmitResponse; + protocolOperationFeesCut : Nat = challenge.protocolOperationFeesCut; + cyclesGenerateResponseSactrlSsctrl : Nat = challenge.cyclesGenerateResponseSactrlSsctrl; + cyclesGenerateResponseSsctrlGs : Nat = challenge.cyclesGenerateResponseSsctrlGs; + cyclesGenerateResponseSsctrlSsllm : Nat = challenge.cyclesGenerateResponseSsctrlSsllm; + cyclesGenerateResponseOwnctrlGs : Nat = challenge.cyclesGenerateResponseOwnctrlGs; + cyclesGenerateResponseOwnctrlOwnllmLOW : Nat = challenge.cyclesGenerateResponseOwnctrlOwnllmLOW; + cyclesGenerateResponseOwnctrlOwnllmMEDIUM : Nat = challenge.cyclesGenerateResponseOwnctrlOwnllmMEDIUM; + cyclesGenerateResponseOwnctrlOwnllmHIGH : Nat = challenge.cyclesGenerateResponseOwnctrlOwnllmHIGH; + challengeQueuedId : Text = challengeQueuedId; + challengeQueuedBy : Principal = challengeQueuedBy; + challengeQueuedTo : Principal = challengeQueuedTo; + challengeQueuedTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + }; + + // A ShareAgent canister first sends the challenge to the Shared mAIner Service to be put in that canisters queue + if (MAINER_AGENT_CANISTER_TYPE == #ShareAgent) { + // Add the cycles required for the ShareService queue (We already checked there is enough) + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - calling Cycles.add for = " # debug_show(challenge.cyclesGenerateResponseSactrlSsctrl) # " Cycles"); + Cycles.add(challenge.cyclesGenerateResponseSactrlSsctrl); + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - calling addChallengeToShareServiceQueue of shareServiceCanisterActor = " # Principal.toText(Principal.fromActor(shareServiceCanisterActor))); + let challegeQueueInputResult = await shareServiceCanisterActor.addChallengeToShareServiceQueue(challengeQueueInput); + + switch (challegeQueueInputResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - addChallengeToShareServiceQueue returned with error : " # debug_show (error)); + // Do not store it in the queue + return; + }; + case (#Ok(challengeQueueInput_)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - addChallengeToShareServiceQueue returned successfully : "); + challengeQueueInput := challengeQueueInput_; + }; + }; + }; + + let _pushResult_ = pushChallengeQueue(challengeQueueInput); + + return; + }; + }; + }; + + // Function of ShareService canister to add new challenge to the ShareService canisters queue + public shared (msg) func addChallengeToShareServiceQueue(challengeQueueInput : Types.ChallengeQueueInput) : async Types.ChallengeQueueInputResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + + if (MAINER_AGENT_CANISTER_TYPE != #ShareService) { + return #Err(#Unauthorized); + }; + + // Only registered ShareAgent canisters may call this + switch (getShareAgentCanister(Principal.toText(msg.caller))) { + case (null) { return #Err(#Unauthorized); }; + case (?_shareAgentEntry) { + // Check that the record looks correct + if (challengeQueueInput.challengeQueuedBy != msg.caller) { + return #Err(#Unauthorized); + }; + + // TODO: make sure the cycles are sufficient + // Accept required cycles for queue input + let cyclesAcceptedForShareServiceQueue = Cycles.accept(challengeQueueInput.cyclesGenerateResponseSactrlSsctrl); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addChallengeToShareServiceQueue - cyclesAcceptedForShareServiceQueue = " # Nat.toText(cyclesAcceptedForShareServiceQueue) # " from caller " # Principal.toText(msg.caller)); + + // Store it in the queue + let _pushResult_ = pushChallengeQueue(challengeQueueInput); + return #Ok(challengeQueueInput); + }; + }; + }; + + private func processNextChallenge() : async () { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - entered"); + + if (MAINER_AGENT_CANISTER_TYPE == #ShareAgent) { + // This should never happen, but still protect against it + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - Something is wrong. processNextChallenge should not be called by a ShareAgent."); + return; + }; + + // Process the next challenge in the challengeQueue + switch (popChallengeQueue()) { + case (null) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - Queue is empty. Nothing to do."); + return; + }; + case (?challengeQueueInput) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - challengeQueueInput" # debug_show (challengeQueueInput)); + + // Check if the canister has enough cycles for this particular Challenge + if (not sufficientCyclesToProcessChallenge(challengeQueueInput)) { + // Note: do not set pause flag + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - Not enough cycles to process challenge. Pushing it back on the queue to try later."); + // Push the challenge back to the queue to try again later + let _pushResult_ = pushChallengeQueue(challengeQueueInput); + return; + }; + + // Process the challenge + // Sanity checks + if (challengeQueueInput.challengeId == "" or challengeQueueInput.mainerPromptId == "") { + return; + }; + switch (challengeQueueInput.challengeStatus) { + case (#Open) { + // continue + }; + case (_) { return }; + }; + switch (challengeQueueInput.challengeClosedTimestamp) { + case (null) { + // continue + }; + case (_) { return }; + }; + + ignore processRespondingToChallenge(challengeQueueInput); + return; + }; + }; + }; + + public shared query (msg) func getRoundRobinCanister() : async Types.CanisterIDRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + let canisterIDRecord : Types.CanisterIDRecord = { + canister_id = Principal.toText(Principal.fromActor(_getRoundRobinCanister())); + }; + return #Ok(canisterIDRecord); + }; + + private func _getRoundRobinCanister() : Types.LLMCanister { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): _getRoundRobinCanister: using roundRobinIndex " # Nat.toText(roundRobinIndex)); + + // Protect against invalid roundRobinIndex + if (roundRobinIndex >= llmCanisters.size()) { + roundRobinIndex := 0; + }; + + let canister = llmCanisters.get(roundRobinIndex); + roundRobinIndex += 1; + + var roundRobinIndexTurn = llmCanisters.size(); + if (roundRobinUseAll == false) { + roundRobinIndexTurn := Utils.minNat(roundRobinIndexTurn, roundRobinLLMs); + }; + + if (roundRobinIndex >= roundRobinIndexTurn) { + roundRobinIndex := 0; + }; + + return canister; + }; + + // Function for mAIner Agent Creator canister to add new mAIner ShareAgent canister to a mAIner ShareService canister + public shared (msg) func addMainerShareAgentCanister(canisterEntryToAdd : Types.OfficialMainerAgentCanister) : async Types.MainerAgentCanisterResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + switch (canisterEntryToAdd.canisterType) { + case (#MainerAgent(_)) { + // continue + }; + case (_) { return #Err(#Other("Unsupported canisterType")); } + }; + + // This check does not apply because the mAIner Creator creates the ShareService canister + // Just verifying that only a controller can call this is enough, and also all we can do. + + // TODO - Security: Only official mAIner Creator canisters may call this + // switch (getMainerCreatorCanister(Principal.toText(msg.caller))) { + // case (null) { return #Err(#Unauthorized); }; + // case (?mainerCreatorEntry) { + let canisterEntry : Types.OfficialMainerAgentCanister = { + address : Text = canisterEntryToAdd.address; + subnet : Text = canisterEntryToAdd.subnet; + canisterType: Types.ProtocolCanisterType = canisterEntryToAdd.canisterType; + creationTimestamp : Nat64 = canisterEntryToAdd.creationTimestamp; + createdBy : Principal = canisterEntryToAdd.createdBy; + ownedBy : Principal = canisterEntryToAdd.ownedBy; + status : Types.CanisterStatus = canisterEntryToAdd.status; + mainerConfig : Types.MainerConfigurationInput = canisterEntryToAdd.mainerConfig; + }; + putShareAgentCanister(canisterEntryToAdd.address, canisterEntry); + // }; + // }; + }; + + // TODO - Testing: remove; admin Function to add new mAIner ShareAgent for testing + public shared (msg) func addMainerShareAgentCanisterAdmin(canisterEntryToAdd : Types.OfficialMainerAgentCanister) : async Types.MainerAgentCanisterResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + switch (canisterEntryToAdd.canisterType) { + case (#MainerAgent(_)) { + // continue + }; + case (_) { return #Err(#Other("Unsupported")); } + }; + let canisterEntry : Types.OfficialMainerAgentCanister = { + address : Text = canisterEntryToAdd.address; + subnet : Text = canisterEntryToAdd.subnet; + canisterType: Types.ProtocolCanisterType = canisterEntryToAdd.canisterType; + creationTimestamp : Nat64 = canisterEntryToAdd.creationTimestamp; + createdBy : Principal = canisterEntryToAdd.createdBy; + ownedBy : Principal = canisterEntryToAdd.ownedBy; + status : Types.CanisterStatus = canisterEntryToAdd.status; + mainerConfig : Types.MainerConfigurationInput = canisterEntryToAdd.mainerConfig; + }; + putShareAgentCanister(canisterEntryToAdd.address, canisterEntry); + }; + +// Timers + + // This variable is just for reporting purposes, so an Admin can quickly check the currently used timer regularity + // It is recalculated each time the timer is started + stable var action1RegularityInSeconds = 0; // Timer is not yet set + + // ---------------------------------------------------------- + // How often Own and ShareService mAIners wake up to process the next challenge from the queue + // TODO: revisit for #Own mAiners... + stable var action2RegularityInSeconds = 5; + + stable var cyclesBurnRateFromGameState = CYCLES_BURN_RATE_DEFAULT; // Just set it to some default value. The actual value is retrieved from the GameState in startTimerExecution() + + public shared (msg) func setTimerAction2RegularityInSecondsAdmin(_action2RegularityInSeconds : Nat) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + action2RegularityInSeconds := _action2RegularityInSeconds; + // Restart the timer with the new regularity + let _ = await startTimerExecution(msg.caller, "setTimerAction2RegularityInSecondsAdmin"); + return #Ok({ status_code = 200 }); + }; + + public shared query (msg) func getTimerActionRegularityInSecondsAdmin() : async Types.MainerTimersResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + return #Ok({ + action1RegularityInSeconds = action1RegularityInSeconds; + action2RegularityInSeconds = action2RegularityInSeconds; + }); + }; + // ---------------------------------------------------------- + + private func triggerRecurringAction1() : async () { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 1 was triggered"); + let result = await pullNextChallenge(); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 1 result"); + D.print(debug_show (result)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 1 result"); + }; + + private func triggerRecurringAction2() : async () { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 2 was triggered"); + let result = await processNextChallenge(); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 2 result"); + D.print(debug_show (result)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): Recurring action 2 result"); + }; + + + private func startTimerExecution(callerPrincipal : Principal, calledFromEndpoint : Text) : async Types.AuthRecordResult { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - entered" # ", calledFromEndpoint = " # calledFromEndpoint # ", callerPrincipal = " # Principal.toText(callerPrincipal)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - initialTimerId1 = " # debug_show(initialTimerId1) # ", recurringTimerId1 = " # debug_show(recurringTimerId1) # ", bufferTimerId1 size = " # Nat.toText(bufferTimerId1.size())); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - recurringTimerId2 = " # debug_show(recurringTimerId2) # ", bufferTimerId2 size = " # Nat.toText(bufferTimerId2.size())); + + var res = "You started the timers: "; + let TIMER_REGULARITY_DEFAULT = 5; // TODO - Implementation: move to common file + var timerRegularity = TIMER_REGULARITY_DEFAULT; + + // Calculate timer regularity based on cycles burn rate for user's mAIner + if (MAINER_AGENT_CANISTER_TYPE == #Own or MAINER_AGENT_CANISTER_TYPE == #ShareAgent) { + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + switch (getCurrentAgentSettings()) { + case (null) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - No agentSettings found, using default cyclesBurnRateFromGameState = " # debug_show(cyclesBurnRateFromGameState)); + // use default + }; + case (?agentSettings) { + let cyclesBurnRateResult : Types.CyclesBurnRateResult = await gameStateCanisterActor.getCyclesBurnRate(agentSettings.cyclesBurnRate); + switch (cyclesBurnRateResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - gamestate.getCyclesBurnRate returned error: " # debug_show(error)); + // we leave timer + }; + case (#Ok(cyclesBurnRateFromGameState_)) { + cyclesBurnRateFromGameState := cyclesBurnRateFromGameState_; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - cyclesBurnRate retrieved from gamestate.getCyclesBurnRate = " # debug_show(cyclesBurnRateFromGameState) ); + }; + }; + }; + }; + // Get the cycles used per response from GameState to calculate the timer regularity + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - calling getMainerCyclesUsedPerResponse of gameStateCanisterActor"); + let cyclesUsedResult : Types.NatResult = await gameStateCanisterActor.getMainerCyclesUsedPerResponse(); + switch (cyclesUsedResult) { + case (#Err(error)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - getMainerCyclesUsedPerResponse error: " # debug_show(error)); + // we leave timer + }; + case (#Ok(cyclesUsed)) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - cyclesBurnRateFromGameState = " # debug_show(cyclesBurnRateFromGameState)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - cyclesUsed per response = " # debug_show(cyclesUsed)); + timerRegularity := TimerRegularity.getTimerRegularityForCyclesBurnRate(cyclesBurnRateFromGameState, cyclesUsed); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - timerRegularity = " # debug_show(timerRegularity) # ", cyclesBurnRateFromGameState = " # debug_show(cyclesBurnRateFromGameState) # ", cyclesUsed (per response) = " # debug_show(cyclesUsed)); + }; + }; + }; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - timerRegularity = " # Nat.toText(timerRegularity) # " seconds"); + + if (MAINER_AGENT_CANISTER_TYPE == #Own or MAINER_AGENT_CANISTER_TYPE == #ShareAgent) { + res := res # " 1, "; + var randomInitialTimer = 3000; // Default + try { + let random = Random.Finite(await Random.blob()); + let randomValueResult = random.range(6); // Uniformly distributes outcomes in the numeric range [0 .. 2^6 - 1] = [0 .. 63] + switch (randomValueResult) { + case (?randomValue) { + randomInitialTimer := (randomValue + 1) * 2 * 60; // i.e. range for randomInitialTimer is between 120 and 7680 seconds (2 and 128 minutes) + }; + case (_) { + // Something went wrong with the random generation, use default + }; + }; + } catch (error : Error) { + D.print("mAIner startTimerExecution error in generating randomInitialTimer: " # Error.message(error)); + // Some error occurred, use default + }; + // First stop an existing timer if it exists + let _ = await stopTimerExecution(); + + // Now start the timer + let initialTimerId = setTimer(#seconds randomInitialTimer, + func () : async () { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - setTimer 1"); + let id = recurringTimer(#seconds timerRegularity, triggerRecurringAction1); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - Successfully start timer 1 with id = " # debug_show (id)); + recurringTimerId1 := ?id; + addTimerToBuffer(bufferTimerId1, id); + // Clear initialTimerId1 since it has fired + initialTimerId1 := null; + + // Record this timer creation (recurring timer 1) + let timersEntry : Types.MainerAgentTimers = { + action1RegularityInSeconds = action1RegularityInSeconds; + action2RegularityInSeconds = action2RegularityInSeconds; + initialTimerId1 = null; + randomInitialTimer1InSeconds = null; + recurringTimerId1 = ?id; + recurringTimerId2 = null; + creationTimestamp = Nat64.fromNat(Int.abs(Time.now())); + createdBy = callerPrincipal; + calledFromEndpoint = calledFromEndpoint; + }; + ignore putAgentTimers(timersEntry); + + await triggerRecurringAction1(); + }); + // Store the initial timer ID for reporting and cancellation + initialTimerId1 := ?initialTimerId; + addTimerToBuffer(bufferTimerId1, initialTimerId); + + // Record this timer creation (initial timer 1) + let initialTimersEntry : Types.MainerAgentTimers = { + action1RegularityInSeconds = timerRegularity; + action2RegularityInSeconds = action2RegularityInSeconds; + initialTimerId1 = ?initialTimerId; + randomInitialTimer1InSeconds = ?randomInitialTimer; + recurringTimerId1 = null; + recurringTimerId2 = null; + creationTimestamp = Nat64.fromNat(Int.abs(Time.now())); + createdBy = callerPrincipal; + calledFromEndpoint = calledFromEndpoint; + }; + ignore putAgentTimers(initialTimersEntry); + + // For reporting purposes + action1RegularityInSeconds := timerRegularity; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - setTimer 1 with regularity = " # Nat.toText(timerRegularity) # " seconds, randomInitialTimer = " # Nat.toText(randomInitialTimer)); + }; + + if (MAINER_AGENT_CANISTER_TYPE == #Own or MAINER_AGENT_CANISTER_TYPE == #ShareService) { + res := res # " 2"; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - setTimer 2"); + let id = recurringTimer(#seconds action2RegularityInSeconds, triggerRecurringAction2); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - Successfully start timer 2 with id = " # debug_show (id) # ", regularity = " # Nat.toText(action2RegularityInSeconds) # " seconds"); + recurringTimerId2 := ?id; + addTimerToBuffer(bufferTimerId2, id); + + // Record this timer creation (recurring timer 2) + let timersEntry : Types.MainerAgentTimers = { + action1RegularityInSeconds = action1RegularityInSeconds; + action2RegularityInSeconds = action2RegularityInSeconds; + initialTimerId1 = null; + randomInitialTimer1InSeconds = null; + recurringTimerId1 = null; + recurringTimerId2 = ?id; + creationTimestamp = Nat64.fromNat(Int.abs(Time.now())); + createdBy = callerPrincipal; + calledFromEndpoint = calledFromEndpoint; + }; + ignore putAgentTimers(timersEntry); + + // Trigger it right away. Without this, the first action would be delayed by the recurring timer regularity + await triggerRecurringAction2(); + }; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - leaving..."); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - initialTimerId1 = " # debug_show(initialTimerId1) # ", recurringTimerId1 = " # debug_show(recurringTimerId1) # ", bufferTimerId1 size = " # Nat.toText(bufferTimerId1.size())); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): startTimerExecution - recurringTimerId2 = " # debug_show(recurringTimerId2) # ", bufferTimerId2 size = " # Nat.toText(bufferTimerId2.size())); + + let authRecord = { auth = res }; + return #Ok(authRecord); + }; + + private func stopTimerExecution() : async Types.AuthRecordResult { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - entered"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - initialTimerId1 = " # debug_show(initialTimerId1) # ", recurringTimerId1 = " # debug_show(recurringTimerId1) # ", bufferTimerId1 size = " # Nat.toText(bufferTimerId1.size())); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - recurringTimerId2 = " # debug_show(recurringTimerId2) # ", bufferTimerId2 size = " # Nat.toText(bufferTimerId2.size())); + + var res = "You stopped the timers: "; + + // Cancel all timers in buffer 1 + var hasActiveTimer1 = false; + for (i in Iter.range(0, bufferTimerId1.size() - 1)) { + let timerId = bufferTimerId1.get(i); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - Cancelling timer 1 with id = " # debug_show(timerId)); + Timer.cancelTimer(timerId); + // Only report if we're cancelling an active timer (either initial or recurring) + if ((initialTimerId1 != null and initialTimerId1 == ?timerId) or + (recurringTimerId1 != null and recurringTimerId1 == ?timerId)) { + hasActiveTimer1 := true; + }; + }; + if (hasActiveTimer1) { + res := res # " 1, "; + }; + // Clear the running timer IDs we track for reporting purposes, but do NOT clear the buffer for additional robustness + // NOT clearing bufferTimerId1 on purpose, to handle the case if Timer.cancelTimer did not actually cancel the timer + initialTimerId1 := null; + recurringTimerId1 := null; + + // Cancel all timers in buffer 2 + var hasActiveTimer2 = false; + for (i in Iter.range(0, bufferTimerId2.size() - 1)) { + let timerId = bufferTimerId2.get(i); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - Cancelling timer 2 with id = " # debug_show(timerId)); + Timer.cancelTimer(timerId); + // Only report if we're cancelling an active timer (recurring only for timer 2) + if (recurringTimerId2 != null and recurringTimerId2 == ?timerId) { + hasActiveTimer2 := true; + }; + }; + if (hasActiveTimer2) { + res := res # " 2, "; + }; + // Clear the running timer IDs we track for reporting purposes, but do NOT clear the buffer for additional robustness + // NOT clearing bufferTimerId2 on purpose, to handle the case if Timer.cancelTimer did not actually cancel the timer + recurringTimerId2 := null; + + if (res == "You stopped the timers: ") { + res := "No timers were running"; + }; + + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - leaving..."); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - initialTimerId1 = " # debug_show(initialTimerId1) # ", recurringTimerId1 = " # debug_show(recurringTimerId1) # ", bufferTimerId1 size = " # Nat.toText(bufferTimerId1.size())); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopTimerExecution - recurringTimerId2 = " # debug_show(recurringTimerId2) # ", bufferTimerId2 size = " # Nat.toText(bufferTimerId2.size())); + + return #Ok({ auth = res }); + }; + + public shared (msg) func startTimerExecutionAdmin() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + await startTimerExecution(msg.caller, "startTimerExecutionAdmin"); + }; + + public shared (msg) func stopTimerExecutionAdmin() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + await stopTimerExecution(); + }; + + public shared query (msg) func getTimerBuffersAdmin() : async Types.MainerTimerBuffersResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + // Convert buffers to arrays + let buffer1Array = Buffer.toArray(bufferTimerId1); + let buffer2Array = Buffer.toArray(bufferTimerId2); + + let timerBuffers : Types.MainerTimerBuffers = { + bufferTimerId1 = buffer1Array; + bufferTimerId2 = buffer2Array; + }; + + return #Ok(timerBuffers); + }; + + public shared (msg) func setTimerBufferMaxSizeAdmin(maxSize: Nat) : async Types.StatusCodeRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#StatusCode(401)); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + + TIMER_BUFFER_MAX_SIZE := maxSize; + + return #Ok({ status_code = 200 }); + }; + + public shared query (msg) func getTimerBufferMaxSizeAdmin() : async Types.NatResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + return #Ok(TIMER_BUFFER_MAX_SIZE); + }; + + // Testing function for admin for ShareService + public shared (msg) func triggerChallengeResponseAdmin() : async Types.AuthRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#StatusCode(401)); + }; + /* if (MAINER_AGENT_CANISTER_TYPE != #ShareService) { + // execute the timer 1 action + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): triggerChallengeResponseAdmin - (timer 1 action) calling pullNextChallenge"); + await pullNextChallenge(); + }; + if (MAINER_AGENT_CANISTER_TYPE != #ShareAgent) { + // execute timer 2 action + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): triggerChallengeResponseAdmin - (timer 2 action) calling processNextChallenge"); + await processNextChallenge(); + }; */ + if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { + // execute timer 2 action + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): triggerChallengeResponseAdmin - (timer 2 action) calling processNextChallenge"); + await processNextChallenge(); + let authRecord = { auth = "You triggered the response generation." }; + return #Ok(authRecord); + } else { + return #Err(#StatusCode(401)); + }; + }; + + // Upgrade Hooks + system func preupgrade() { + mainerCreatorCanistersStorageStable := Iter.toArray(mainerCreatorCanistersStorage.entries()); + shareAgentCanistersStorageStable := Iter.toArray(shareAgentCanistersStorage.entries()); + userToShareAgentsStorageStable := Iter.toArray(userToShareAgentsStorage.entries()); + + // Convert Buffer to [Text] for stable storage + let llmCanisterIds = Buffer.Buffer(llmCanisters.size()); + for (llmCanister in llmCanisters.vals()) { + llmCanisterIds.add(Principal.toText(Principal.fromActor(llmCanister))); + }; + llmCanistersStable := Buffer.toArray(llmCanisterIds); + }; + + system func postupgrade() { + mainerCreatorCanistersStorage := HashMap.fromIter(Iter.fromArray(mainerCreatorCanistersStorageStable), mainerCreatorCanistersStorageStable.size(), Text.equal, Text.hash); + mainerCreatorCanistersStorageStable := []; + shareAgentCanistersStorage := HashMap.fromIter(Iter.fromArray(shareAgentCanistersStorageStable), shareAgentCanistersStorageStable.size(), Text.equal, Text.hash); + shareAgentCanistersStorageStable := []; + userToShareAgentsStorage := HashMap.fromIter(Iter.fromArray(userToShareAgentsStorageStable), userToShareAgentsStorageStable.size(), Principal.equal, Principal.hash); + userToShareAgentsStorageStable := []; + + // Reconstruct Buffer from [Text] + llmCanisters := Buffer.Buffer(llmCanistersStable.size()); + for (canisterId in llmCanistersStable.vals()) { + let llmCanister = actor (canisterId) : Types.LLMCanister; + llmCanisters.add(llmCanister); + }; + llmCanistersStable := []; + + // Reset reporting variable for timer + action1RegularityInSeconds := 0; // Timer is not yet set (They don't persist across upgrades) + }; +}; diff --git a/src/ShareService/src/Utils.mo b/src/ShareService/src/Utils.mo new file mode 100644 index 0000000..1c3442e --- /dev/null +++ b/src/ShareService/src/Utils.mo @@ -0,0 +1,126 @@ +import Result "mo:base/Result"; +import Debug "mo:base/Debug"; +import Iter "mo:base/Iter"; +import Nat8 "mo:base/Nat8"; +import Prelude "mo:base/Prelude"; +import UUID "mo:uuid/UUID"; +import Source "mo:uuid/async/SourceV4"; +import Text "mo:base/Text"; +import Blob "mo:base/Blob"; +import Array "mo:base/Array"; +import HashMap "mo:base/HashMap"; +import Nat32 "mo:base/Nat32"; +import Nat "mo:base/Nat"; + +module { + type Result = Result.Result; + + /// Returns the value of the result and traps if there isn't any value to return. + public func get_ok(result : Result) : T { + switch result { + case (#ok value) value; + case (#err _) Debug.trap("pattern failed"); + }; + }; + + /// Returns the value of the result and traps with a custom message if there isn't any value to return. + public func get_ok_except(result : Result, expect : Text) : T { + switch result { + case (#ok value) value; + case (#err _) { + Debug.print("pattern failed"); + Debug.trap(expect); + }; + }; + }; + + /// Unwraps the value of the option. + public func unwrap(option : ?T) : T { + switch option { + case (?value) value; + case null Prelude.unreachable(); + }; + }; + + // Returns the hexadecimal representation of a `Nat8` considered as a `Nat4`. + func nat4ToText(nat4 : Nat8) : Text { + Text.fromChar( + switch nat4 { + case 0 '0'; + case 1 '1'; + case 2 '2'; + case 3 '3'; + case 4 '4'; + case 5 '5'; + case 6 '6'; + case 7 '7'; + case 8 '8'; + case 9 '9'; + case 10 'a'; + case 11 'b'; + case 12 'c'; + case 13 'd'; + case 14 'e'; + case 15 'f'; + case _ Prelude.unreachable(); + } + ); + }; + + /// Returns the hexadecimal representation of a `Nat8`. + func nat8ToText(byte : Nat8) : Text { + let leftNat4 = byte >> 4; + let rightNat4 = byte & 15; + nat4ToText(leftNat4) # nat4ToText(rightNat4); + }; + + /// Returns the hexadecimal representation of a byte array. + public func bytesToText(bytes : [Nat8]) : Text { + Text.join("", Iter.map(Iter.fromArray(bytes), func(n) { nat8ToText(n) })); + }; + + public func btcTxIdToText(txid : Blob) : Text { + let reversedArray = Array.reverse(Blob.toArray(txid)); + let txidText = bytesToText(reversedArray); + return txidText; + }; + + public func newRandomUniqueId() : async Text { + let g = Source.Source(); + UUID.toText(await g.new()); + }; + + // Generalized function to debug print the HashMap + public func printHashMap(map : HashMap.HashMap, toText : T -> Text) { + var output : Text = ""; + for ((key, value) in map.entries()) { + output := output # key # ": " # toText(value) # ", "; + }; + Debug.print(output); + }; + + public func minNat(a : Nat, b : Nat) : Nat { + if (a < b) { + return a; + } else { + return b; + }; + }; + + // Convert a random string to a seed for the LLM model, with wide variability + public func getRandomLlmSeed(randomString : Text) : Nat32 { + let value : Nat32 = Text.hash(randomString); + + let selector = value % 6; // Use last bits to select range between 0-5 + + switch (selector) { + case 0 { value % 10 }; // 0-9 + case 1 { value % 100 }; // 0-99 + case 2 { value % 1000 }; // 0-999 + case 3 { value % 1_000_000 }; // 0-999,999 + case 4 { value % 1_000_000_000 }; // 0-999,999,999 + case _ { value }; // Full range + }; + }; + +}; diff --git a/src/ShareService/test/__init__.py b/src/ShareService/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/ShareService/test/conftest.py b/src/ShareService/test/conftest.py new file mode 100644 index 0000000..00b5fbe --- /dev/null +++ b/src/ShareService/test/conftest.py @@ -0,0 +1,9 @@ +"""The pytest fixtures + https://docs.pytest.org/en/latest/fixture.html +""" + +# pylint: disable=missing-function-docstring, unused-import, wildcard-import, unused-wildcard-import +import pytest +from icpp.conftest_base import * # pytest fixtures provided by icpp + +# Define your pytest fixtures below diff --git a/src/ShareService/test/test_apis.py b/src/ShareService/test/test_apis.py new file mode 100644 index 0000000..0d2d53c --- /dev/null +++ b/src/ShareService/test/test_apis.py @@ -0,0 +1,92 @@ +"""Test canister APIs + + First deploy the canister, then run: + + $ pytest --network=[local/ic] test_apis.py + +""" + +# pylint: disable=unused-argument, missing-function-docstring, unused-import, wildcard-import, unused-wildcard-import, line-too-long, invalid-name + +from pathlib import Path +import pytest +from icpp.smoketest import call_canister_api + +# Path to the dfx.json file +DFX_JSON_PATH = Path(__file__).parent / "../dfx.json" + +# Canister in the dfx.json file we want to test +# TODO: put this in a loop over all mAIner control canisters +CANISTER_NAME = "mainer_ctrlb_canister_0" +# CANISTER_NAME = "mainer_ctrlb_canister_1" + + +def test__whoami_anonymous(identity_anonymous: dict[str, str], network: str) -> None: + response = call_canister_api( + dfx_json_path=DFX_JSON_PATH, + canister_name=CANISTER_NAME, + canister_method="whoami", + canister_argument="()", + network=network, + timeout_seconds=10, + ) + expected_response = f'(principal "{identity_anonymous["principal"]}")' + assert response == expected_response + + +def test__whoami_default(identity_default: dict[str, str], network: str) -> None: + response = call_canister_api( + dfx_json_path=DFX_JSON_PATH, + canister_name=CANISTER_NAME, + canister_method="whoami", + canister_argument="()", + network=network, + timeout_seconds=10, + ) + expected_response = f'(principal "{identity_default["principal"]}")' + assert response == expected_response + + +def test__amiWhitelisted_anonymous( + identity_anonymous: dict[str, str], network: str +) -> None: + response = call_canister_api( + dfx_json_path=DFX_JSON_PATH, + canister_name=CANISTER_NAME, + canister_method="amiWhitelisted", + canister_argument="()", + network=network, + timeout_seconds=10, + ) + expected_response = "(variant { Err = variant { Unauthorized } })" + assert response == expected_response + + +def test__amiWhitelisted(network: str) -> None: + response = call_canister_api( + dfx_json_path=DFX_JSON_PATH, + canister_name=CANISTER_NAME, + canister_method="amiWhitelisted", + canister_argument="()", + network=network, + timeout_seconds=10, + ) + expected_response = ( + '(variant { Ok = record { auth = "You are a controller of this canister.";} })' + ) + assert response == expected_response + + +def test__isControllerLogicOk(network: str) -> None: + response = call_canister_api( + dfx_json_path=DFX_JSON_PATH, + canister_name=CANISTER_NAME, + canister_method="isControllerLogicOk", + canister_argument="()", + network=network, + timeout_seconds=10, + ) + expected_response = ( + '(variant { Ok = record { auth = "You are a controller of this canister.";} })' + ) + assert response == expected_response \ No newline at end of file From 12260f27b9803a9a94b360aee2a082d43eee9145 Mon Sep 17 00:00:00 2001 From: patnorris Date: Fri, 31 Oct 2025 11:02:06 +0100 Subject: [PATCH 06/14] Remove ShareService changes from mAIner --- src/mAIner/src/Main.mo | 174 +++++------------------------------------ 1 file changed, 18 insertions(+), 156 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index 927c5de..e72e2e2 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -270,142 +270,6 @@ actor class MainerAgentCtrlbCanister() = this { }; }; - // Share Service: flag to decide whether cycles should be sent to LLMs automatically as part of flow - stable var SEND_CYCLES_TO_LLM : Bool = true; - - public shared (msg) func toggleSendCyclesToLlmFlagAdmin() : async Types.AuthRecordResult { - if (Principal.isAnonymous(msg.caller)) { - return #Err(#Unauthorized); - }; - if (not Principal.isController(msg.caller)) { - return #Err(#Unauthorized); - }; - SEND_CYCLES_TO_LLM := not SEND_CYCLES_TO_LLM; - let authRecord = { auth = "You set the flag to " # debug_show(SEND_CYCLES_TO_LLM) }; - return #Ok(authRecord); - }; - - public query (msg) func getSendCyclesToLlmFlagAdmin() : async Types.FlagResult { - if (Principal.isAnonymous(msg.caller)) { - return #Err(#Unauthorized); - }; - if (not Principal.isController(msg.caller)) { - return #Err(#Unauthorized); - }; - - return #Ok({ flag = SEND_CYCLES_TO_LLM }); - }; - - // Share Service: Move cycles to operator's wallet (e.g. onicai) - stable let OPERATOR_WALLET_ADDRESS : Text = "jh35u-eqaaa-aaaag-abf3a-cai"; - stable var cyclesTransactionsStorage : List.List = List.nil(); - - public query (msg) func getCyclesTransactionsAdmin() : async Types.CyclesTransactionsResult { - if (Principal.isAnonymous(msg.caller)) { - return #Err(#Unauthorized); - }; - if (not Principal.isController(msg.caller)) { - return #Err(#Unauthorized); - }; - return #Ok(List.toArray(cyclesTransactionsStorage)); - }; - - stable var MIN_CYCLES_BALANCE : Nat = 30 * Constants.CYCLES_TRILLION; - stable var CYCLES_AMOUNT_TO_OPERATOR : Nat = 10 * Constants.CYCLES_TRILLION; - - public shared (msg) func sendCyclesToOperatorAdmin() : async Types.AddCyclesResult { - if (Principal.isAnonymous(msg.caller)) { - return #Err(#Unauthorized); - }; - if (not Principal.isController(msg.caller)) { - return #Err(#Unauthorized); - }; - let currentCyclesBalance : Nat = Cycles.balance(); - try { - // Only move cycles if cycles balance is big enough - if (currentCyclesBalance - CYCLES_AMOUNT_TO_OPERATOR < MIN_CYCLES_BALANCE) { - D.print("ShareService: sendCyclesToOperatorAdmin - requested cycles transaction but balance is not big enough: " # debug_show(currentCyclesBalance) # debug_show(msg)); - return #Err(#Unauthorized); - }; - - D.print("ShareService: sendCyclesToOperatorAdmin - OPERATOR_WALLET_ADDRESS: " # debug_show(OPERATOR_WALLET_ADDRESS)); - D.print("ShareService: sendCyclesToOperatorAdmin - CYCLES_AMOUNT_TO_OPERATOR: " # debug_show(CYCLES_AMOUNT_TO_OPERATOR)); - Cycles.add(CYCLES_AMOUNT_TO_OPERATOR); - // Send via system API - D.print("ShareService: sendCyclesToOperatorAdmin - calling system API's deposit_cycles to send cycles"); - let deposit_cycles_args = { canister_id : Principal = Principal.fromText(OPERATOR_WALLET_ADDRESS); }; - let _ = ignore IC0.deposit_cycles(deposit_cycles_args); - D.print("ShareService: sendCyclesToOperatorAdmin - called deposit_cycles with ignore"); - // Store the transaction - let transactionEntry : Types.CyclesTransaction = { - amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; - newOfficialCycleBalance : Nat = Cycles.balance(); - creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); - sentBy : Principal = msg.caller; - succeeded : Bool = true; - previousCyclesBalance : Nat = currentCyclesBalance; - }; - cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - D.print("ShareService: sendCyclesToOperatorAdmin - stored transactionEntry: " # debug_show(transactionEntry)); - let addCyclesResponse : Types.AddCyclesRecord = { - added : Bool = true; - amount : Nat = CYCLES_AMOUNT_TO_OPERATOR; - }; - return #Ok(addCyclesResponse); - } catch (e) { - D.print("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e)); - // Store the failed attempt - let transactionEntry : Types.CyclesTransaction = { - amountAdded : Nat = CYCLES_AMOUNT_TO_OPERATOR; - newOfficialCycleBalance : Nat = Cycles.balance(); - creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); - sentBy : Principal = msg.caller; - succeeded : Bool = false; - previousCyclesBalance : Nat = currentCyclesBalance; - }; - cyclesTransactionsStorage := List.push(transactionEntry, cyclesTransactionsStorage); - return #Err(#Other("ShareService: sendCyclesToOperatorAdmin - Failed to send cycles: " # Error.message(e))); - }; - }; - - public shared (msg) func setMinCyclesBalanceAdmin(newCyclesBalance : Nat) : async Types.StatusCodeRecordResult { - if (not Principal.isController(msg.caller)) { - return #Err(#StatusCode(401)); - }; - if (newCyclesBalance < 20 * Constants.CYCLES_TRILLION) { - return #Err(#StatusCode(401)); - }; - MIN_CYCLES_BALANCE := newCyclesBalance; - return #Ok({ status_code = 200 }); - }; - - public query (msg) func getMinCyclesBalanceAdmin() : async Nat { - if (not Principal.isController(msg.caller)) { - return 0; - }; - - return MIN_CYCLES_BALANCE; - }; - - public shared (msg) func setCyclesToSendToOperatorAdmin(newValue : Nat) : async Types.StatusCodeRecordResult { - if (not Principal.isController(msg.caller)) { - return #Err(#StatusCode(401)); - }; - if (newValue > 100 * Constants.CYCLES_TRILLION) { - return #Err(#StatusCode(401)); - }; - CYCLES_AMOUNT_TO_OPERATOR := newValue; - return #Ok({ status_code = 200 }); - }; - - public query (msg) func getCyclesToSendToOperatorAdmin() : async Nat { - if (not Principal.isController(msg.caller)) { - return 0; - }; - - return CYCLES_AMOUNT_TO_OPERATOR; - }; - // -------------------------------------------------------------------------- // Orthogonal Persisted Data storage @@ -1353,28 +1217,26 @@ actor class MainerAgentCtrlbCanister() = this { }; }; - // First send cycles to the LLM, if enabled - if (SEND_CYCLES_TO_LLM) { - var cyclesAdded : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; - if (MAINER_AGENT_CANISTER_TYPE == #Own) { - cyclesAdded := challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; // TODO: adjust for mAIners with setting LOW or MEDIUM - }; - try { - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling Cycles.add for = " # debug_show(cyclesAdded) # " Cycles"); - Cycles.add(cyclesAdded); + // First send cycles to the LLM + var cyclesAdded : Nat = challengeQueueInput.cyclesGenerateResponseSsctrlSsllm; + if (MAINER_AGENT_CANISTER_TYPE == #Own) { + cyclesAdded := challengeQueueInput.cyclesGenerateResponseOwnctrlOwnllmHIGH; // TODO: adjust for mAIners with setting LOW or MEDIUM + }; + try { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling Cycles.add for = " # debug_show(cyclesAdded) # " Cycles"); + Cycles.add(cyclesAdded); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling IC0.deposit_cycles for LLM " # debug_show(llmCanisterPrincipal)); - let deposit_cycles_args = { canister_id : Principal = llmCanisterPrincipal; }; - let _ = await IC0.deposit_cycles(deposit_cycles_args); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - calling IC0.deposit_cycles for LLM " # debug_show(llmCanisterPrincipal)); + let deposit_cycles_args = { canister_id : Principal = llmCanisterPrincipal; }; + let _ = await IC0.deposit_cycles(deposit_cycles_args); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Successfully deposited " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal) ); - } catch (e) { - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal)); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit error is" # Error.message(e)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Successfully deposited " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal) ); + } catch (e) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit " # debug_show(cyclesAdded) # " cycles to LLM canister " # debug_show(llmCanisterPrincipal)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): respondToChallengeDoIt_ - Failed to deposit error is" # Error.message(e)); - return #Err(#FailedOperation); - }; - }; + return #Err(#FailedOperation); + }; let generationId : Text = await Utils.newRandomUniqueId(); @@ -2476,4 +2338,4 @@ actor class MainerAgentCtrlbCanister() = this { // Reset reporting variable for timer action1RegularityInSeconds := 0; // Timer is not yet set (They don't persist across upgrades) }; -}; +}; \ No newline at end of file From b2c9bfc93e3fee3f2c2b3636768e18af8b47ef1e Mon Sep 17 00:00:00 2001 From: patnorris Date: Mon, 10 Nov 2025 17:33:25 +0100 Subject: [PATCH 07/14] Add initial setup --- src/common/Types.mo | 17 +++++ src/mAIner/src/Main.mo | 158 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 161 insertions(+), 14 deletions(-) diff --git a/src/common/Types.mo b/src/common/Types.mo index 63df14e..78982a5 100644 --- a/src/common/Types.mo +++ b/src/common/Types.mo @@ -833,6 +833,23 @@ module Types { public type ChallengeResponseResult = Result; + public type MainerStatus = { + #Active; + #Inactive; + #Collapsing; + #Blackholed; + #Maintenance; + #Other : Text; + }; + + public type MainerStatusEntry = { + status : MainerStatus; + timestamp : Nat64; + currentCyclesBalance : Nat; + note : Text; + previousStatus : MainerStatus; + }; + // Judge public type ScoredResponseInput = ChallengeResponseSubmission and { judgedBy: Principal; diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index 31c3a9d..c13b4d6 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -105,7 +105,7 @@ actor class MainerAgentCtrlbCanister() = this { D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addCycles - Accepted " # Nat.toText(cyclesAdded) # " Cycles from caller " # Principal.toText(msg.caller)); // Unpause the mAIner if it was paused due to low cycle balance - PAUSED_DUE_TO_LOW_CYCLE_BALANCE := false; + let statusUpdateResult = handleMainerStatusAfterTopup(); // Add to official cycle balance and store all official top ups if (Principal.equal(msg.caller, Principal.fromText(GAME_STATE_CANISTER_ID))) { @@ -273,12 +273,46 @@ actor class MainerAgentCtrlbCanister() = this { // -------------------------------------------------------------------------- // Orthogonal Persisted Data storage + // Functionality to handle the mAIner's lifecycle + stable var mainerStatus : Types.MainerStatus = #Active; + + stable var mainerLastActiveEntry : ?Types.MainerStatusEntry = null; + + stable var mainerInactiveEntries : List.List = List.nil(); + stable var previousMainerInactiveEntries : List.List = List.nil(); + + stable var mainerCollapsingEntries : List.List = List.nil(); + stable var previousMainerCollapsingEntries : List.List = List.nil(); + + private func handleMainerStatusAfterTopup() : Bool { + switch (mainerStatus) { + case (#Active) { return true; }; + case (#Inactive) { + // mAIner is currently inactive and has to be switched back to active thanks to the topup + // update status, archive inactive entries and reset them + mainerStatus := #Active; + previousMainerInactiveEntries := mainerInactiveEntries; + mainerInactiveEntries := List.nil(); + return true; + }; + case (#Collapsing) { return false; }; // Topups don't affect collapsing mAIners, they need to be reanimated via a dedicated flow + case (#Blackholed) { return false; }; // Blackholed mAIners aren't affected by topups (or by anything else) + case (#Maintenance) { return false; }; // Nothing to do + case (#Other(_)) { return false; }; // Nothing to do + }; + }; + // The minimum cycle balance we want to maintain stable let CYCLE_BALANCE_MINIMUM = 250 * Constants.CYCLES_BILLION; - // A flag for the frontend to pick up and display a message to the user - stable var PAUSED_DUE_TO_LOW_CYCLE_BALANCE : Bool = false; + // A flag for the frontend to pick up and display a message to the user (now calculated based on the mAIner's status) + private func getPausedDueToLowCycleBalanceFlagValue() : Bool { + if (mainerStatus == #Active) { + return false; + }; + return true; + }; // Internal functions to check if the canister has enough cycles private func sufficientCyclesToProcessChallenge(challenge : Types.Challenge) : Bool { @@ -296,13 +330,112 @@ actor class MainerAgentCtrlbCanister() = this { // TODO: do calculation based on actual setting for LOW, MEDIUM, HIGH requiredCycles := requiredCycles + challenge.cyclesGenerateResponseOwnctrlGs + challenge.cyclesGenerateResponseOwnctrlOwnllmHIGH; }; - if (availableCycles < requiredCycles) { - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): CYCLE BALANCE TOO LOW TO PROCESS CHALLENGE:"); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): requiredCycles = " # debug_show(requiredCycles)); - D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): availableCycles = " # debug_show(availableCycles)); - return false; + + switch (mainerStatus) { + case (#Active) { + if (availableCycles < requiredCycles) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): CYCLE BALANCE TOO LOW TO PROCESS CHALLENGE:"); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): requiredCycles = " # debug_show(requiredCycles)); + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): availableCycles = " # debug_show(availableCycles)); + // mAIner needs to be switched to inactive + mainerStatus := #Inactive; + mainerInactiveEntries := List.nil(); // Should not be necessary but as a precaution + let newEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Inactive; + timestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + currentCyclesBalance : Nat = availableCycles; + note : Text = "Switched to inactive due to low cycles balance"; + previousStatus : Types.MainerStatus = #Active; + }; + mainerInactiveEntries := List.push(newEntry, mainerInactiveEntries); + return false; + }; + // Update last active entry to now + let newEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Active; + timestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + currentCyclesBalance : Nat = availableCycles; + note : Text = ""; + previousStatus : Types.MainerStatus = #Active; + }; + mainerLastActiveEntry := ?newEntry; + return true; + }; + case (#Inactive) { + // Keep track of how long the mAIner has been in the Inactive state + // Add a new entry + let timestampNow : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + let newEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Inactive; + timestamp : Nat64 = timestampNow; + currentCyclesBalance : Nat = availableCycles; + note : Text = "Still inactive"; + previousStatus : Types.MainerStatus = #Inactive; + }; + mainerInactiveEntries := List.push(newEntry, mainerInactiveEntries); + // Check the oldest entry and if too old (i.e. grace period over), change the status to Collapsing + switch (List.last(mainerInactiveEntries)) { + case (null) {}; // Continue + case (?earliestInactiveEntry) { + let mainerInactivityStateGracePeriod : Nat64 = 7 * 24 * 60 * 60 * 1_000_000_000; // 7 days in nanoseconds + if (timestampNow > earliestInactiveEntry.timestamp + mainerInactivityStateGracePeriod) { + // Grace period is over + mainerStatus := #Collapsing; + previousMainerInactiveEntries := mainerInactiveEntries; + mainerInactiveEntries := List.nil(); + let firstCollapsingEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Collapsing; + timestamp : Nat64 = timestampNow; + currentCyclesBalance : Nat = availableCycles; + note : Text = "Switched from inactive to collapsing"; + previousStatus : Types.MainerStatus = #Inactive; + }; + mainerCollapsingEntries := List.nil(); // Should not be necessary but as a precaution + mainerCollapsingEntries := List.push(firstCollapsingEntry, mainerCollapsingEntries); + }; + }; + }; + return false; + }; + case (#Collapsing) { + // Collapsing mAIners never work + // Keep track of how long the mAIner has been in the Collapsing state + // Add a new entry + let timestampNow : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + let newEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Collapsing; + timestamp : Nat64 = timestampNow; + currentCyclesBalance : Nat = availableCycles; + note : Text = "Still collapsing"; + previousStatus : Types.MainerStatus = #Collapsing; + }; + mainerCollapsingEntries := List.push(newEntry, mainerCollapsingEntries); + // Check the oldest entry and if too old (i.e. grace period over), change the status to Blackholed + switch (List.last(mainerCollapsingEntries)) { + case (null) {}; // Continue + case (?earliestCollapsingEntry) { + let mainerCollapsingStateGracePeriod : Nat64 = 14 * 24 * 60 * 60 * 1_000_000_000; // 14 days in nanoseconds + if (timestampNow > earliestCollapsingEntry.timestamp + mainerCollapsingStateGracePeriod) { + // Grace period is over + mainerStatus := #Blackholed; + let lastCollapsingEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Blackholed; + timestamp : Nat64 = timestampNow; + currentCyclesBalance : Nat = availableCycles; + note : Text = "Switched from collapsing to blackholed"; + previousStatus : Types.MainerStatus = #Collapsing; + }; + mainerCollapsingEntries := List.push(lastCollapsingEntry, mainerCollapsingEntries); + previousMainerCollapsingEntries := mainerCollapsingEntries; + }; + }; + }; + return false; + }; + case (#Blackholed) { return false; }; // Blackholed mAIners never work again + case (#Maintenance) { return false; }; + case (#Other(_)) { return false; }; }; - return true; }; private func sufficientCyclesToSubmit(cyclesSubmitResponse : Nat) : Bool { @@ -327,7 +460,7 @@ actor class MainerAgentCtrlbCanister() = this { return #Err(#Unauthorized); }; let response : Types.IssueFlagsRecord = { - lowCycleBalance = PAUSED_DUE_TO_LOW_CYCLE_BALANCE; + lowCycleBalance = getPausedDueToLowCycleBalanceFlagValue(); }; return #Ok(response); }; @@ -1700,12 +1833,9 @@ actor class MainerAgentCtrlbCanister() = this { if (not sufficientCyclesToProcessChallenge(challenge)) { D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - PAUSING RESPONSE GENERATION DUE TO LOW CYCLE BALANCE"); - PAUSED_DUE_TO_LOW_CYCLE_BALANCE := true; return; }; - // Ok,the canister has enough cycles - PAUSED_DUE_TO_LOW_CYCLE_BALANCE := false; - + // Ok,the canister has enough cycles // Add the challenge to the queue let challengeQueuedId : Text = await Utils.newRandomUniqueId(); let challengeQueuedBy : Principal = Principal.fromActor(this); From 34347b674e09e7e497c6a7028c74e48605116a69 Mon Sep 17 00:00:00 2001 From: patnorris Date: Tue, 11 Nov 2025 15:15:37 +0100 Subject: [PATCH 08/14] Add function for game state to stop collapsing mAIner --- src/common/Types.mo | 3 +++ src/mAIner/src/Main.mo | 46 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/src/common/Types.mo b/src/common/Types.mo index 78982a5..23cbb49 100644 --- a/src/common/Types.mo +++ b/src/common/Types.mo @@ -850,6 +850,8 @@ module Types { previousStatus : MainerStatus; }; + public type MainerStatusEntryResult = Result; + // Judge public type ScoredResponseInput = ChallengeResponseSubmission and { judgedBy: Principal; @@ -1281,6 +1283,7 @@ module Types { addMainerShareAgentCanister: (OfficialMainerAgentCanister) -> async MainerAgentCanisterResult; startTimerExecutionAdmin: () -> async AuthRecordResult; addCycles: () -> async AddCyclesResult; + stopMainerFromCollapsing: () -> async MainerStatusEntryResult; }; public type LLMCanister = actor { diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index c13b4d6..98eee4f 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -104,12 +104,9 @@ actor class MainerAgentCtrlbCanister() = this { let cyclesAdded = Cycles.accept(Cycles.available()); D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): addCycles - Accepted " # Nat.toText(cyclesAdded) # " Cycles from caller " # Principal.toText(msg.caller)); - // Unpause the mAIner if it was paused due to low cycle balance - let statusUpdateResult = handleMainerStatusAfterTopup(); - - // Add to official cycle balance and store all official top ups + // Game State can make official top ups (via its top up flow) if (Principal.equal(msg.caller, Principal.fromText(GAME_STATE_CANISTER_ID))) { - // Game State can make official top ups (via its top up flow) + // Add to official cycle balance and store all official top ups officialCyclesBalance := officialCyclesBalance + cyclesAdded; let topUpEntry : Types.OfficialMainerCycleTopUp = { amountAdded : Nat = cyclesAdded; @@ -118,6 +115,8 @@ actor class MainerAgentCtrlbCanister() = this { sentBy : Principal = msg.caller; }; officialCycleTopUpsStorage := List.push(topUpEntry, officialCycleTopUpsStorage); + // Unpause the mAIner if it was inactive due to low cycles balance + let statusUpdateResult = handleMainerStatusAfterTopup(); }; return #Ok({ @@ -465,6 +464,43 @@ actor class MainerAgentCtrlbCanister() = this { return #Ok(response); }; + public shared (msg) func stopMainerFromCollapsing() : async Types.MainerStatusEntryResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.equal(msg.caller, Principal.fromText(GAME_STATE_CANISTER_ID))) { + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopMainerFromCollapsing - Called by unauthorized caller " # Principal.toText(msg.caller)); + return #Err(#Unauthorized); + }; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): stopMainerFromCollapsing - Called by game state " # GAME_STATE_CANISTER_ID); + switch (mainerStatus) { + case (#Collapsing) { + // mAIner is currently collapsing and has to be switched back to inactive + // update status, archive and reset collapsing entries, and create first inactive entry + mainerStatus := #Inactive; + let newEntry : Types.MainerStatusEntry = { + status : Types.MainerStatus = #Inactive; + timestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + currentCyclesBalance : Nat = Cycles.balance(); + note : Text = "Switched from collapsing to inactive"; + previousStatus : Types.MainerStatus = #Collapsing; + }; + mainerCollapsingEntries := List.push(newEntry, mainerCollapsingEntries); + previousMainerCollapsingEntries := mainerCollapsingEntries; + mainerCollapsingEntries := List.nil(); + mainerInactiveEntries := List.nil(); // should not be needed but as a precaution + mainerInactiveEntries := List.push(newEntry, mainerInactiveEntries); + + return #Ok(newEntry); + }; + case (_) { + // This should not happen (as game state would be making a wrong call) + D.print("mAIner: stopMainerFromCollapsing - Called by game state but not in state Collapsing but " # debug_show(mainerStatus)); + return #Err(#Unauthorized); + }; + }; + }; + // Statistics // TODO - Implementation: set based on cycles flow data calculated in GameState stable var TOTAL_MAINER_CYCLES_BURNT : Nat = 100 * Constants.CYCLES_BILLION; // Initial value represents costs for creating this canister From 255b00df69a9b4c45317a26959645a6ce0800482 Mon Sep 17 00:00:00 2001 From: patnorris Date: Tue, 11 Nov 2025 15:17:31 +0100 Subject: [PATCH 09/14] Add TODO --- src/mAIner/src/Main.mo | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index 98eee4f..f3ca44c 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -426,6 +426,7 @@ actor class MainerAgentCtrlbCanister() = this { }; mainerCollapsingEntries := List.push(lastCollapsingEntry, mainerCollapsingEntries); previousMainerCollapsingEntries := mainerCollapsingEntries; + // TODO: call Game State with this update }; }; }; From 7f253392887765f0edc510fe9c8d8faa7a62423c Mon Sep 17 00:00:00 2001 From: patnorris Date: Tue, 11 Nov 2025 16:55:58 +0100 Subject: [PATCH 10/14] Incorporate PR feedback --- src/mAIner/src/Main.mo | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index f3ca44c..ff9abc1 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -313,6 +313,34 @@ actor class MainerAgentCtrlbCanister() = this { return true; }; + private func getMainerInactivityStateGracePeriod() : Nat64 { + switch (MAINER_AGENT_CANISTER_TYPE) { + case (#ShareAgent) { + return 7 * 24 * 60 * 60 * 1_000_000_000; // 7 days in nanoseconds + }; + case (#Own) { + return 3 * 24 * 60 * 60 * 1_000_000_000; // 3 days in nanoseconds + }; + case (_) { + return 0; + }; + }; + }; + + private func getMainerCollapsingStateGracePeriod() : Nat64 { + switch (MAINER_AGENT_CANISTER_TYPE) { + case (#ShareAgent) { + return 14 * 24 * 60 * 60 * 1_000_000_000; // 14 days in nanoseconds + }; + case (#Own) { + return 4 * 24 * 60 * 60 * 1_000_000_000; // 4 days in nanoseconds + }; + case (_) { + return 0; + }; + }; + }; + // Internal functions to check if the canister has enough cycles private func sufficientCyclesToProcessChallenge(challenge : Types.Challenge) : Bool { // The ShareService canister does not Queue or Submit @@ -376,7 +404,7 @@ actor class MainerAgentCtrlbCanister() = this { switch (List.last(mainerInactiveEntries)) { case (null) {}; // Continue case (?earliestInactiveEntry) { - let mainerInactivityStateGracePeriod : Nat64 = 7 * 24 * 60 * 60 * 1_000_000_000; // 7 days in nanoseconds + let mainerInactivityStateGracePeriod : Nat64 = getMainerInactivityStateGracePeriod(); if (timestampNow > earliestInactiveEntry.timestamp + mainerInactivityStateGracePeriod) { // Grace period is over mainerStatus := #Collapsing; @@ -413,7 +441,7 @@ actor class MainerAgentCtrlbCanister() = this { switch (List.last(mainerCollapsingEntries)) { case (null) {}; // Continue case (?earliestCollapsingEntry) { - let mainerCollapsingStateGracePeriod : Nat64 = 14 * 24 * 60 * 60 * 1_000_000_000; // 14 days in nanoseconds + let mainerCollapsingStateGracePeriod : Nat64 = getMainerCollapsingStateGracePeriod(); if (timestampNow > earliestCollapsingEntry.timestamp + mainerCollapsingStateGracePeriod) { // Grace period is over mainerStatus := #Blackholed; From 87540021f37b0f9752300f5823c0fb9f555e60fe Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 12 Nov 2025 15:23:27 +0100 Subject: [PATCH 11/14] Add function for user to stop an existing mAIner from collapsing --- src/GameState/src/Main.mo | 147 ++++++++++++++++++++++++++++++++++++++ src/common/Types.mo | 2 + 2 files changed, 149 insertions(+) diff --git a/src/GameState/src/Main.mo b/src/GameState/src/Main.mo index 63329a0..360e29f 100644 --- a/src/GameState/src/Main.mo +++ b/src/GameState/src/Main.mo @@ -4542,6 +4542,19 @@ actor class GameStateCanister() = this { return #Ok(CYCLES_BALANCE_THRESHOLD_FUNNAI_TOPUPS); }; + stable var PRICE_FUNNAI_FOR_STOPPING_MAINER_FROM_COLLAPSING : Nat64 = 100; // In FUNNAI + public shared (msg) func setFunnaiForStoppingMainerFromCollapsingAdmin(funnaiAmount : Nat64) : async Types.StatusCodeRecordResult { + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + PRICE_FUNNAI_FOR_STOPPING_MAINER_FROM_COLLAPSING := funnaiAmount; + return #Ok({ status_code = 200 }); + }; + + public query func getPriceFunnaiStoppingMainerFromCollapsingAdmin() : async Types.PriceResult { + return #Ok({ price = PRICE_FUNNAI_FOR_STOPPING_MAINER_FROM_COLLAPSING }); + }; + // Decide on usage of incoming funds (e.g. for mAIner creation or top ups) private func handleIncomingFunds(transactionEntry : Types.RedeemedTransactionBlock) : async Types.HandleIncomingFundsResult { D.print("GameState: handleIncomingFunds - transactionEntry: "# debug_show(transactionEntry)); @@ -5085,6 +5098,15 @@ actor class GameStateCanister() = this { D.print("GameState: verifyIncomingFunnaiPayment - #MainerTopUp "); // continue as there is no fixed price }; + case (#MainerStoppedFromCollapsing(_)) { + D.print("GameState: verifyIncomingFunnaiPayment - #MainerStoppedFromCollapsing "); + // Check correct price was paid + D.print("GameState: verifyIncomingFunnaiPayment - #MainerStoppedFromCollapsing PRICE_OWN_MAINER: "# debug_show(PRICE_FOR_OWN_MAINER_ICP)); + let E8S_PER_FUNNAI_WITH_BUFFER : Nat64 = 90_000_000; // 10^8 e8s per FUNNAI + if (Nat64.fromNat(burnOperation.amount) < PRICE_FUNNAI_FOR_STOPPING_MAINER_FROM_COLLAPSING * E8S_PER_FUNNAI_WITH_BUFFER) { + return #Err(#Other("Transaction didn't pay full price")); + }; + }; case (_) { return #Err(#Other("Unsupported")); } }; D.print("GameState: verifyIncomingFunnaiPayment - verified: "); @@ -7147,6 +7169,131 @@ actor class GameStateCanister() = this { }; }; + // Function for user to stop an existing mAIner from collapsing (and thus becoming blackholed) by burning FUNNAI + public shared (msg) func stopUserMainerAgentFromCollapsingWithFunnai(mainerInput : Types.MainerAgentTopUpInput) : async Types.MainerAgentCanisterResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (PAUSE_PROTOCOL and not Principal.isController(msg.caller)) { + return #Err(#Other("Protocol is currently paused")); + }; + + // Ensure this transaction block hasn't been redeemed yet (no double spending) + let transactionToVerify = mainerInput.paymentTransactionBlockId; + switch (checkExistingFunnaiTransactionBlock(transactionToVerify)) { + case (false) { + // new transaction, continue + }; + case (true) { + // already redeem transaction + return #Err(#Other("Already redeemed this transaction block")); // no double spending + }; + }; + + // Sanity checks on provided mAIner info + let mainerInfo : Types.OfficialMainerAgentCanister = mainerInput.mainerAgent; + if (not Principal.equal(mainerInfo.ownedBy, msg.caller)) { + // Only the mAIner owner may call this + return #Err(#Unauthorized); + }; + if (mainerInfo.address == "") { + // The mAIner Controller canister address is needed + return #Err(#InvalidId); + }; + switch (mainerInfo.canisterType) { + case (#MainerAgent(_)) { + // continue + }; + case (_) { return #Err(#Other("Unsupported")); } + }; + + // Verify existing mAIner entry + switch (getUserMainerAgents(msg.caller)) { + case (null) { + return #Err(#Unauthorized); + }; + case (?userMainerEntries) { + switch (List.find(userMainerEntries, func(mainerEntry: Types.OfficialMainerAgentCanister) : Bool { mainerEntry.address == mainerInfo.address } )) { + case (null) { + return #Err(#InvalidId); + }; + case (?userMainerEntry) { + // Sanity checks on userMainerEntry (i.e. address provided is correct and matches entry info) + switch (userMainerEntry.canisterType) { + case (#MainerAgent(_)) { + // continue + }; + case (_) { return #Err(#Other("Unsupported")); } + }; + + // Verify user's FUNNAI payment for this via the TransactionBlockId + var verifiedPayment : Bool = false; + var amountPaid : Nat = 0; + let redeemedFor : Types.RedeemedForOptions = #MainerStoppedFromCollapsing(userMainerEntry.address); + let creationTimestamp : Nat64 = Nat64.fromNat(Int.abs(Time.now())); + let transactionEntryToVerify : Types.RedeemedTransactionBlock = { + paymentTransactionBlockId : Nat64 = mainerInput.paymentTransactionBlockId; + creationTimestamp : Nat64 = creationTimestamp; + redeemedBy : Principal = msg.caller; + redeemedFor : Types.RedeemedForOptions = redeemedFor; + amount : Nat = amountPaid; // to be updated + }; + let verificationResponse = await verifyIncomingFunnaiPayment(transactionEntryToVerify); + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - verificationResponse: "# debug_show(verificationResponse)); + switch (verificationResponse) { + case (#Ok(verificationResult)) { + verifiedPayment := verificationResult.verified; + amountPaid := verificationResult.amountPaid; + }; + case (_) { + return #Err(#Other("Payment verification failed")); + }; + }; + if (not verifiedPayment) { + return #Err(#Other("Payment couldn't be verified")); + }; + + let newTransactionEntry : Types.RedeemedTransactionBlock = { + paymentTransactionBlockId : Nat64 = mainerInput.paymentTransactionBlockId; + creationTimestamp : Nat64 = creationTimestamp; + redeemedBy : Principal = msg.caller; + redeemedFor : Types.RedeemedForOptions = redeemedFor; + amount : Nat = amountPaid; + }; + try { + let Mainer_Actor : Types.MainerAgentCtrlbCanister = actor (userMainerEntry.address); + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - calling stopMainerFromCollapsing for mAIner: " # debug_show(userMainerEntry.address)); + let stopMainerFromCollapsingResponse = await Mainer_Actor.stopMainerFromCollapsing(); + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - stopMainerFromCollapsingResponse: " # debug_show(stopMainerFromCollapsingResponse)); + switch (stopMainerFromCollapsingResponse) { + case (#Err(error)) { + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - stopMainerFromCollapsingResponse FailedOperation: " # debug_show(error)); + return #Err(#FailedOperation); + }; + case (#Ok(mainerStatusEntry)) { + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - mainerStatusEntry: " # debug_show(mainerStatusEntry)); + // Track redeemed FUNNAI transaction blocks to ensure no double spending + switch (putRedeemedFunnaiTransactionBlock(newTransactionEntry)) { + case (false) { + // TODO - Error Handling: likely retry + }; + case (true) { + // continue + }; + }; + return #Ok(userMainerEntry); + }; + }; + } catch (e) { + D.print("GameState: stopUserMainerAgentFromCollapsingWithFunnai - Failed to stop mAIner from collapsing: " # debug_show(mainerInput) # Error.message(e)); + return #Err(#Other("GameState: stopUserMainerAgentFromCollapsingWithFunnai - Failed to stop mAIner from collapsing: " # debug_show(mainerInput) # Error.message(e))); + }; + }; + }; + }; + }; + }; + // Function for user to get their mAIner agent canisters public shared query (msg) func getMainerAgentCanistersForUser() : async Types.MainerAgentCanistersResult { if (Principal.isAnonymous(msg.caller)) { diff --git a/src/common/Types.mo b/src/common/Types.mo index 23cbb49..0f549f2 100644 --- a/src/common/Types.mo +++ b/src/common/Types.mo @@ -406,6 +406,8 @@ module Types { public type RedeemedForOptions = { #MainerCreation : MainerAgentCanisterType; #MainerTopUp : CanisterAddress; + #MainerStoppedFromCollapsing : CanisterAddress; + #Other : Text; }; public type HandleIncomingFundsRecord = { From 158e7cdf05e1f9b0a8497a3bafc2dc5e366fcbd6 Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 12 Nov 2025 15:49:48 +0100 Subject: [PATCH 12/14] Add data structures and functions for blackholed user mAIners --- src/GameState/src/Main.mo | 160 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) diff --git a/src/GameState/src/Main.mo b/src/GameState/src/Main.mo index 360e29f..fff755d 100644 --- a/src/GameState/src/Main.mo +++ b/src/GameState/src/Main.mo @@ -2096,6 +2096,90 @@ actor class GameStateCanister() = this { return sharedServiceCanistersStorage.vals().next(); }; + // Blackholed mAIner Registry: mAIner agent canisters (owned by users) which collapsed and cannot be reanimated anymore (thus forever inactive) + stable var blackholedMainerAgentCanistersStorageStable : [(Text, Types.OfficialMainerAgentCanister)] = []; + var blackholedMainerAgentCanistersStorage : HashMap.HashMap = HashMap.HashMap(0, Text.equal, Text.hash); + stable var blackholedUserToMainerAgentsStorageStable : [(Principal, List.List)] = []; + var blackholedUserToMainerAgentsStorage : HashMap.HashMap> = HashMap.HashMap(0, Principal.equal, Principal.hash); + + private func addBlackholedMainerAgentCanister(canisterEntryToAdd : Types.OfficialMainerAgentCanister) : Types.MainerAgentCanisterResult { + switch (canisterEntryToAdd.canisterType) { + case (#MainerAgent(_)) { + // continue + }; + case (_) { + D.print("GameState: addBlackholedMainerAgentCanister - Unsupported canisterEntryToAdd.canisterType: " # debug_show(canisterEntryToAdd.canisterType) ); + return #Err(#Other("Unsupported")); + } + }; + let _ = putBlackholedUserMainerAgent(canisterEntryToAdd); + blackholedMainerAgentCanistersStorage.put(canisterEntryToAdd.address, canisterEntryToAdd); + D.print("GameState: addBlackholedMainerAgentCanister added canisterEntry: " # debug_show(canisterEntryToAdd) ); + return #Ok(canisterEntryToAdd); + }; + + private func putBlackholedUserMainerAgent(canisterEntry : Types.OfficialMainerAgentCanister) : Bool { + switch (getBlackholedUserMainerAgents(canisterEntry.ownedBy)) { + case (null) { + // first entry + let userCanistersList : List.List = List.make(canisterEntry); + blackholedUserToMainerAgentsStorage.put(canisterEntry.ownedBy, userCanistersList); + return true; + }; + case (?userCanistersList) { + //existing list, add entry to it + // Deduplicate (based on creationTimestamp) + let filteredUserCanistersList : List.List = List.filter(userCanistersList, func(listEntry: Types.OfficialMainerAgentCanister) : Bool { listEntry.creationTimestamp != canisterEntry.creationTimestamp }); + let updatedUserCanistersList : List.List = List.push(canisterEntry, filteredUserCanistersList); + blackholedUserToMainerAgentsStorage.put(canisterEntry.ownedBy, updatedUserCanistersList); + return true; + }; + }; + }; + + private func getBlackholedUserMainerAgents(userId : Principal) : ?List.List { + switch (blackholedUserToMainerAgentsStorage.get(userId)) { + case (null) { return null; }; + case (?userCanistersList) { return ?userCanistersList; }; + }; + }; + + private func getBlackholedMainerAgents() : [Types.OfficialMainerAgentCanister] { + var mainerAgents : List.List = List.nil(); + for (userMainerAgentsList in blackholedUserToMainerAgentsStorage.vals()) { + mainerAgents := List.append(userMainerAgentsList, mainerAgents); + }; + return List.toArray(mainerAgents); + }; + + private func getNumberBlackholedMainerAgents(mainerType : Types.MainerAgentCanisterType) : Nat { + switch (mainerType) { + case (#Own) { + let iter = blackholedMainerAgentCanistersStorage.vals(); + let mappedIter = Iter.filter(iter, func (mainerEntry : Types.OfficialMainerAgentCanister) : Bool { + switch (mainerEntry.mainerConfig.mainerAgentCanisterType) { + case (#Own) { return true; }; + case (#ShareAgent) { return false; }; + case (_) { return false; } + }; + }); + return Iter.size(mappedIter); + }; + case (#ShareAgent) { + let iter = blackholedMainerAgentCanistersStorage.vals(); + let mappedIter = Iter.filter(iter, func (mainerEntry : Types.OfficialMainerAgentCanister) : Bool { + switch (mainerEntry.mainerConfig.mainerAgentCanisterType) { + case (#Own) { return false; }; + case (#ShareAgent) { return true; }; + case (_) { return false; } + }; + }); + return Iter.size(mappedIter); + }; + case (_) { return 0; } + }; + }; + // mAIner Registry: Official mAIner agent canisters (owned by users) stable var mainerAgentCanistersStorageStable : [(Text, Types.OfficialMainerAgentCanister)] = []; var mainerAgentCanistersStorage : HashMap.HashMap = HashMap.HashMap(0, Text.equal, Text.hash); @@ -7384,6 +7468,76 @@ actor class GameStateCanister() = this { }; }; + // Blackholed mAIners + // Function for user to get their mAIner agent canisters + public shared query (msg) func getBlackholedMainerAgentCanistersForUser() : async Types.MainerAgentCanistersResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + + switch (getBlackholedUserMainerAgents(msg.caller)) { + case (null) { return #Err(#Other("No canisters for this caller")); }; + case (?userCanistersList) { + return #Ok(List.toArray(userCanistersList)); + }; + }; + }; + + public shared query (msg) func getBlackholedMainerAgentCanistersAdmin() : async Types.MainerAgentCanistersResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + return #Ok(getBlackholedMainerAgents()); + }; + + public shared query (msg) func getBlackholedMainerAgentCanistersForUserAdmin(user : Text) : async Types.MainerAgentCanistersResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + switch (getBlackholedUserMainerAgents(Principal.fromText(user))) { + case (null) { return #Err(#Other("No canisters for this user")); }; + case (?userCanistersList) { + return #Ok(List.toArray(userCanistersList)); + }; + }; + }; + + public shared query (msg) func getNumBlackholedMainerAgentCanistersForUserAdmin(user : Text) : async Types.NatResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + switch (getBlackholedUserMainerAgents(Principal.fromText(user))) { + case (null) { return #Err(#Other("No canisters for this user")); }; + case (?userCanistersList) { + return #Ok(List.size(userCanistersList)); + }; + }; + }; + + public shared query (msg) func getNumberBlackholedMainerAgentsAdmin(checkInput : Types.CheckMainerLimit) : async Types.NatResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + if (not Principal.isController(msg.caller)) { + return #Err(#Unauthorized); + }; + + let result = getNumberBlackholedMainerAgents(checkInput.mainerType); + return #Ok(result); + }; + // Function for mAIner agent canister to retrieve a random open challenge public shared (msg) func getRandomOpenChallenge() : async Types.ChallengeResult { if (Principal.isAnonymous(msg.caller)) { @@ -8335,6 +8489,8 @@ actor class GameStateCanister() = this { sharedServiceCanistersStorageStable := Iter.toArray(sharedServiceCanistersStorage.entries()); redeemedTransactionBlocksStorageStable := Iter.toArray(redeemedTransactionBlocksStorage.entries()); redeemedFunnaiTransactionBlocksStorageStable := Iter.toArray(redeemedFunnaiTransactionBlocksStorage.entries()); + blackholedMainerAgentCanistersStorageStable := Iter.toArray(blackholedMainerAgentCanistersStorage.entries()); + blackholedUserToMainerAgentsStorageStable := Iter.toArray(blackholedUserToMainerAgentsStorage.entries()); }; system func postupgrade() { @@ -8370,5 +8526,9 @@ actor class GameStateCanister() = this { redeemedTransactionBlocksStorageStable := []; redeemedFunnaiTransactionBlocksStorage := HashMap.fromIter(Iter.fromArray(redeemedFunnaiTransactionBlocksStorageStable), redeemedFunnaiTransactionBlocksStorageStable.size(), Nat.equal, Hash.hash); redeemedFunnaiTransactionBlocksStorageStable := []; + blackholedMainerAgentCanistersStorage := HashMap.fromIter(Iter.fromArray(blackholedMainerAgentCanistersStorageStable), blackholedMainerAgentCanistersStorageStable.size(), Text.equal, Text.hash); + blackholedMainerAgentCanistersStorageStable := []; + blackholedUserToMainerAgentsStorage := HashMap.fromIter(Iter.fromArray(blackholedUserToMainerAgentsStorageStable), blackholedUserToMainerAgentsStorageStable.size(), Principal.equal, Principal.hash); + blackholedUserToMainerAgentsStorageStable := []; }; }; From 7bda5d5c7e5dcffeeab10ba50822a1481df5662d Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 12 Nov 2025 16:42:01 +0100 Subject: [PATCH 13/14] Add function for mAIner to notify it is blackholed --- src/GameState/src/Main.mo | 25 +++++++++++++++++++++++++ src/common/Types.mo | 3 ++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/GameState/src/Main.mo b/src/GameState/src/Main.mo index fff755d..069f94c 100644 --- a/src/GameState/src/Main.mo +++ b/src/GameState/src/Main.mo @@ -7378,6 +7378,31 @@ actor class GameStateCanister() = this { }; }; + // Function for mAIner agent canister to notify game state that it collapsed and is now blackholed + public shared (msg) func notifyMainerAgentCanisterIsBlackholed() : async Types.AuthRecordResult { + if (Principal.isAnonymous(msg.caller)) { + return #Err(#Unauthorized); + }; + // Only official mAIner agent canisters may call this (and thus blackhole itself) + switch (getMainerAgentCanister(Principal.toText(msg.caller))) { + case (null) { return #Err(#Unauthorized); }; + case (?mainerAgentEntry) { + // Add mAIner as blackholed + let blackholedResult : Types.MainerAgentCanisterResult = addBlackholedMainerAgentCanister(mainerAgentEntry); + switch (blackholedResult) { + case (#Ok(blackholedMainerEntry)) { + // Remove mAIner entry from active agents + let result1 = removeMainerAgentCanister(Principal.toText(msg.caller)); + let result2 = removeUserMainerAgent(mainerAgentEntry); + let authRecord = { auth = "The mAIner notified that it's blackholed" }; + return #Ok(authRecord); + }; + case (_) { return #Err(#FailedOperation); }; + }; + }; + }; + }; + // Function for user to get their mAIner agent canisters public shared query (msg) func getMainerAgentCanistersForUser() : async Types.MainerAgentCanistersResult { if (Principal.isAnonymous(msg.caller)) { diff --git a/src/common/Types.mo b/src/common/Types.mo index 0f549f2..98c27ba 100644 --- a/src/common/Types.mo +++ b/src/common/Types.mo @@ -1262,7 +1262,8 @@ module Types { getJudgePromptInfo : (Text) -> async Types.JudgePromptInfoResult; getMainerCyclesUsedPerResponse : () -> async NatResult; getCyclesBurnRate : (Types.CyclesBurnRateDefault) -> async Types.CyclesBurnRateResult; - addCycles: () -> async AddCyclesResult; + addCycles : () -> async AddCyclesResult; + notifyMainerAgentCanisterIsBlackholed : () -> async AuthRecordResult; }; public type MainerCreator_Actor = actor { From 5b0aecbdd67cf63195ee5cbe71cab188dc488279 Mon Sep 17 00:00:00 2001 From: patnorris Date: Wed, 12 Nov 2025 16:59:56 +0100 Subject: [PATCH 14/14] Add functionality to notify game sate that the mAIner is blackholed --- src/mAIner/src/Main.mo | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/mAIner/src/Main.mo b/src/mAIner/src/Main.mo index ff9abc1..8d97faa 100644 --- a/src/mAIner/src/Main.mo +++ b/src/mAIner/src/Main.mo @@ -342,7 +342,7 @@ actor class MainerAgentCtrlbCanister() = this { }; // Internal functions to check if the canister has enough cycles - private func sufficientCyclesToProcessChallenge(challenge : Types.Challenge) : Bool { + private func sufficientCyclesToProcessChallenge(challenge : Types.Challenge) : async Bool { // The ShareService canister does not Queue or Submit if (MAINER_AGENT_CANISTER_TYPE == #ShareService) { return true; @@ -454,7 +454,10 @@ actor class MainerAgentCtrlbCanister() = this { }; mainerCollapsingEntries := List.push(lastCollapsingEntry, mainerCollapsingEntries); previousMainerCollapsingEntries := mainerCollapsingEntries; - // TODO: call Game State with this update + // Call Game State with this update to notify that this mAIner is now blackholed + let gameStateCanisterActor = actor (GAME_STATE_CANISTER_ID) : Types.GameStateCanister_Actor; + D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): calling notifyMainerAgentCanisterIsBlackholed of gameStateCanisterActor = " # Principal.toText(Principal.fromActor(gameStateCanisterActor))); + ignore gameStateCanisterActor.notifyMainerAgentCanisterIsBlackholed(); }; }; }; @@ -1896,7 +1899,8 @@ actor class MainerAgentCtrlbCanister() = this { case (#Ok(challenge : Types.Challenge)) { D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - challenge = " # debug_show (challenge)); - if (not sufficientCyclesToProcessChallenge(challenge)) { + let sufficientCyclesResult : Bool = await sufficientCyclesToProcessChallenge(challenge); + if (not sufficientCyclesResult) { D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): pullNextChallenge - PAUSING RESPONSE GENERATION DUE TO LOW CYCLE BALANCE"); return; }; @@ -2019,7 +2023,8 @@ actor class MainerAgentCtrlbCanister() = this { D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - challengeQueueInput" # debug_show (challengeQueueInput)); // Check if the canister has enough cycles for this particular Challenge - if (not sufficientCyclesToProcessChallenge(challengeQueueInput)) { + let sufficientCyclesResult : Bool = await sufficientCyclesToProcessChallenge(challengeQueueInput); + if (not sufficientCyclesResult) { // Note: do not set pause flag D.print("mAIner (" # debug_show(MAINER_AGENT_CANISTER_TYPE) # "): processNextChallenge - Not enough cycles to process challenge. Pushing it back on the queue to try later."); // Push the challenge back to the queue to try again later