Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
0190196
feat: add cost model infrastructure for Value builtins
Unisay Sep 24, 2025
befe59d
feat: implement comprehensive Value benchmarking framework
Unisay Sep 24, 2025
f04a755
feat: add statistical models and cost parameters for Value builtins
Unisay Sep 24, 2025
92b338a
feat: replace placeholder costing with parameter-driven implementation
Unisay Sep 24, 2025
c2ea365
feat: add comprehensive benchmark data for Value operations
Unisay Sep 24, 2025
90f88d4
feat: add Logarithmic wrapper for modeling logarithmic complexity
Unisay Oct 2, 2025
ea24922
refactor: simplify Value benchmarks with Cardano-compliant constraints
Unisay Oct 2, 2025
3833e4a
refactor: simplify Value builtin cost models with improved documentation
Unisay Oct 8, 2025
819e734
feat: update benchmark data for Value builtins
Unisay Oct 9, 2025
67da997
refactor: refine cost model parameters for Value builtins
Unisay Oct 9, 2025
15df58f
refactor: use GHC.Num.Integer.integerLog2 in Logarithmic instance
Unisay Oct 9, 2025
c5266e9
refactor: use consistent size measure wrappers for Value builtins
Unisay Oct 9, 2025
b434ea9
feat: update benchmark data and cost model parameters for Value builtins
Unisay Oct 9, 2025
109f444
docs: add changelog entry for Value builtin cost model updates
Unisay Oct 9, 2025
c846557
feat: regenerate cost models after rebase to account for validation o…
Unisay Oct 9, 2025
2c404e1
feat: update Value builtin cost models with remote CI benchmark data
Unisay Oct 10, 2025
49f24a0
feat: update constant costs for builtin value operations
Unisay Oct 13, 2025
bff4bf2
refactor: standardize cost model JSON formatting to 4-space indentation
Unisay Oct 13, 2025
071c8e4
chore: space
Unisay Oct 13, 2025
1de3622
refactor: simplify memoryUsageInteger by removing MagicHash
Unisay Oct 13, 2025
66e7f56
refactor: consolidate Value size measurement types
Unisay Oct 14, 2025
b52f4f0
test: improve error diagnostics in evaluation context test utilities
Unisay Oct 14, 2025
a8326df
feat: update parameter names and test expectations for Value builtins
Unisay Oct 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000112100
| mem: 100000000800})
({cpu: 338744
| mem: 801})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000112100
| mem: 100000000800})
({cpu: 338744
| mem: 801})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 269422
| mem: 601})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 81100
| mem: 601})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 269422
| mem: 601})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 269422
| mem: 601})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 269422
| mem: 601})
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
({cpu: 100000080100
| mem: 100000000600})
({cpu: 175261
| mem: 601})
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
<!--
A new scriv changelog fragment.
Uncomment the section that is right (remove the HTML comment wrapper).
For top level release notes, leave all the headers commented out.
-->

<!--
### Removed
- A bullet item for the Removed category.
-->
<!--
### Added
- A bullet item for the Added category.
-->
### Changed

- Updated benchmark data and cost model parameters for Value-related builtins (lookupCoin, valueContains, valueData, unValueData) based on fresh benchmark measurements.
<!--
### Deprecated
- A bullet item for the Deprecated category.
-->
<!--
### Fixed
- A bullet item for the Fixed category.
-->
<!--
### Security
- A bullet item for the Security category.
-->
186 changes: 186 additions & 0 deletions plutus-core/cost-model/budgeting-bench/Benchmarks/Values.hs
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
{-# LANGUAGE BlockArguments #-}
{-# LANGUAGE ImportQualifiedPost #-}
{-# LANGUAGE NumericUnderscores #-}
{-# LANGUAGE TupleSections #-}

module Benchmarks.Values (makeBenchmarks) where

import Prelude

import Common
import Control.Monad (replicateM)
import Criterion.Main (Benchmark)
import Data.ByteString (ByteString)
import Data.Int (Int64)
import PlutusCore (DefaultFun (LookupCoin, UnValueData, ValueContains, ValueData))
import PlutusCore.Evaluation.Machine.ExMemoryUsage (ValueLogOuterOrMaxInner (..),
ValueTotalSize (..))
import PlutusCore.Value (K, Value)
import PlutusCore.Value qualified as Value
import System.Random.Stateful (StatefulGen, StdGen, runStateGen_, uniformByteStringM, uniformRM)

----------------------------------------------------------------------------------------------------
-- Benchmarks --------------------------------------------------------------------------------------

makeBenchmarks :: StdGen -> [Benchmark]
makeBenchmarks gen =
[ lookupCoinBenchmark gen
, valueContainsBenchmark gen
, valueDataBenchmark gen
, unValueDataBenchmark gen
]

----------------------------------------------------------------------------------------------------
-- LookupCoin --------------------------------------------------------------------------------------

lookupCoinBenchmark :: StdGen -> Benchmark
lookupCoinBenchmark gen =
createThreeTermBuiltinBenchElementwiseWithWrappers
(id, id, ValueLogOuterOrMaxInner) -- Wrap Value argument to report outer/max inner size with log
LookupCoin -- the builtin fun
[] -- no type arguments needed (monomorphic builtin)
(lookupCoinArgs gen) -- the argument combos to generate benchmarks for

lookupCoinArgs :: StdGen -> [(ByteString, ByteString, Value)]
lookupCoinArgs gen = runStateGen_ gen \(g :: g) -> do
-- Add search keys to common test values
let testValues = generateTestValues gen
commonWithKeys <- mapM (withSearchKeys g . pure) testValues

-- Additional tests specific to lookupCoin
let valueSizes = [(100, 10), (500, 20), (1_000, 50), (2_000, 100)]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the reasoning behind picking these specific sizes?

Most importantly, however, you're not actually generating Values of these sizes, because you're not checking whether the keys you generate are unique per map or not.

additionalTests <-
sequence $
-- Value size tests (number of policies × tokens per policy)
[ withSearchKeys g (generateConstrainedValue numPolicies tokensPerPolicy g)
| (numPolicies, tokensPerPolicy) <- valueSizes
]
-- Additional random tests for parameter spread
<> replicate 100 (withSearchKeys g (generateValue g))
pure $ commonWithKeys ++ additionalTests

-- | Add random search keys to a Value (keys may or may not exist in the Value)
withSearchKeys :: (StatefulGen g m) => g -> m Value -> m (ByteString, ByteString, Value)
withSearchKeys g genValue = do
value <- genValue
key1 <- generateKeyBS g
key2 <- generateKeyBS g
pure (key1, key2, value)

----------------------------------------------------------------------------------------------------
-- ValueContains -----------------------------------------------------------------------------------

valueContainsBenchmark :: StdGen -> Benchmark
valueContainsBenchmark gen =
createTwoTermBuiltinBenchElementwiseWithWrappers
(ValueLogOuterOrMaxInner, ValueTotalSize)
-- Container: outer/maxInner with log, Contained: totalSize
ValueContains -- the builtin fun
[] -- no type arguments needed (monomorphic builtin)
(valueContainsArgs gen) -- the argument combos to generate benchmarks for

valueContainsArgs :: StdGen -> [(Value, Value)]
valueContainsArgs gen = runStateGen_ gen \g -> replicateM 100 do
-- Generate a random container value
container <- generateValue g
-- Select a random subset of entries from the container to ensure contained ⊆ container
containedSize <- uniformRM (0, Value.totalSize container) g
-- Take the first containedSize entries to ensure contained ⊆ container
let selectedEntries = take containedSize (Value.toFlatList container)

-- Group selected entries back by policy
let contained =
Value.fromList
[ (policyId, [(tokenName, quantity)])
| (policyId, tokenName, quantity) <- selectedEntries
]

pure (container, contained)

----------------------------------------------------------------------------------------------------
-- ValueData ---------------------------------------------------------------------------------------

valueDataBenchmark :: StdGen -> Benchmark
valueDataBenchmark gen = createOneTermBuiltinBench ValueData [] (generateTestValues gen)

----------------------------------------------------------------------------------------------------
-- UnValueData -------------------------------------------------------------------------------------

unValueDataBenchmark :: StdGen -> Benchmark
unValueDataBenchmark gen =
createOneTermBuiltinBench UnValueData [] (Value.valueData <$> generateTestValues gen)

----------------------------------------------------------------------------------------------------
-- Value Generators --------------------------------------------------------------------------------

-- | Generate common test values for benchmarking
generateTestValues :: StdGen -> [Value]
generateTestValues gen = runStateGen_ gen \g ->
-- Empty value as edge case
(Value.empty :)
<$>
-- Random tests for parameter spread (100 combinations)
replicateM 100 (generateValue g)

-- | Generate Value with random budget between 1 and 30,000 bytes
generateValue :: (StatefulGen g m) => g -> m Value
generateValue g = do
maxEntries <- uniformRM (1, maxValueEntries maxValueInBytes) g
generateValueMaxEntries maxEntries g
where
-- Maximum budget for Value generation (30,000 bytes)
maxValueInBytes :: Int
maxValueInBytes = 30_000

-- Calculate maximum possible number of entries with maximal key sizes that fits in the budget
maxValueEntries :: Int -> Int
maxValueEntries budget =
let bytesPerEntry =
{- bytes per policy -} Value.maxKeyLen
{- bytes per token -} + Value.maxKeyLen
{- bytes per quantity (Int64 takes up 8 bytes) -} + 8
in budget `div` bytesPerEntry

-- | Generate Value within total size budget
generateValueMaxEntries :: (StatefulGen g m) => Int -> g -> m Value
generateValueMaxEntries maxEntries g = do
-- Uniform random distribution: cover full range from many policies (few tokens each)
-- to few policies (many tokens each)
numPolicies <- uniformRM (1, maxEntries) g
let tokensPerPolicy = if numPolicies > 0 then maxEntries `div` numPolicies else 0

generateConstrainedValue numPolicies tokensPerPolicy g

-- | Generate constrained Value
generateConstrainedValue
:: (StatefulGen g m)
=> Int -- Number of policies
-> Int -- Number of tokens per policy
-> g
-> m Value
generateConstrainedValue numPolicies tokensPerPolicy g = do
policyIds <- replicateM numPolicies (generateKey g)
tokenNames <- replicateM tokensPerPolicy (generateKey g)

let quantity :: Integer
quantity = fromIntegral (maxBound :: Int64)

nestedMap :: [(K, [(K, Integer)])]
nestedMap = (,(,quantity) <$> tokenNames) <$> policyIds

pure $ Value.fromList nestedMap

----------------------------------------------------------------------------------------------------
-- Other Generators --------------------------------------------------------------------------------

-- | Generate random key (always maxKeyLen bytes for Cardano compliance)
generateKey :: (StatefulGen g m) => g -> m K
generateKey g = do
bs <- uniformByteStringM Value.maxKeyLen g
case Value.k bs of
Just key -> pure key
Nothing -> error "Internal error: maxKeyLen key should always be valid"

-- | Generate random key as ByteString (for lookup arguments)
generateKeyBS :: (StatefulGen g m) => g -> m ByteString
generateKeyBS = uniformByteStringM Value.maxKeyLen
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the keys are completely random, then lookupCoin will probably never hit an existing entry, right?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lookupCoin will probably never hit an existing entry,

Maybe that's what we want? Do we know if finding out that something's not in the map is the worst case? Naively you might think that the time taken to discover that some key is not in the map is always greater or equal to the time taken to find a key that is in the map.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't agree. I think we should actively include both the case when the map contains the key and when it doesn't. Otherwise we're not really measuring this case, and that's the whole point of benchmarking, right? Otherwise we would just use the, analytically discovered, worst-time complexity of the algorithm and pick a function from that category for its cost, right?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, as I mentioned above, you won't have a good idea of the actual size of the Value if you don't enforce uniqueness of the keys.

2 changes: 2 additions & 0 deletions plutus-core/cost-model/budgeting-bench/Main.hs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import Benchmarks.Pairs qualified
import Benchmarks.Strings qualified
import Benchmarks.Tracing qualified
import Benchmarks.Unit qualified
import Benchmarks.Values qualified

import Criterion.Main
import Criterion.Types as C
Expand Down Expand Up @@ -60,6 +61,7 @@ main = do
<> Benchmarks.Strings.makeBenchmarks gen
<> Benchmarks.Tracing.makeBenchmarks gen
<> Benchmarks.Unit.makeBenchmarks gen
<> Benchmarks.Values.makeBenchmarks gen

{- Run the nop benchmarks with a large time limit (30 seconds) in an attempt to
get accurate results. -}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,5 +176,10 @@ builtinMemoryModels = BuiltinCostModelBase
, paramLengthOfArray = Id $ ModelOneArgumentConstantCost 10
, paramListToArray = Id $ ModelOneArgumentLinearInX $ OneVariableLinearFunction 7 1
, paramIndexArray = Id $ ModelTwoArgumentsConstantCost 32
-- Builtin values
, paramLookupCoin = Id $ ModelThreeArgumentsConstantCost 10
, paramValueContains = Id $ ModelTwoArgumentsConstantCost 32
, paramValueData = Id $ ModelOneArgumentConstantCost 32
, paramUnValueData = Id $ ModelOneArgumentConstantCost 32
}
where identityFunction = OneVariableLinearFunction 0 1
10 changes: 10 additions & 0 deletions plutus-core/cost-model/create-cost-model/CreateBuiltinCostModel.hs
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,11 @@ builtinCostModelNames = BuiltinCostModelBase
, paramLengthOfArray = "lengthOfArrayModel"
, paramListToArray = "listToArrayModel"
, paramIndexArray = "indexArrayModel"
-- Builtin values
, paramLookupCoin = "lookupCoinModel"
, paramValueContains = "valueContainsModel"
, paramValueData = "valueDataModel"
, paramUnValueData = "unValueDataModel"
}


Expand Down Expand Up @@ -279,6 +284,11 @@ createBuiltinCostModel bmfile rfile = do
paramLengthOfArray <- getParams readCF1 paramLengthOfArray
paramListToArray <- getParams readCF1 paramListToArray
paramIndexArray <- getParams readCF2 paramIndexArray
-- Builtin values
paramLookupCoin <- getParams readCF3 paramLookupCoin
paramValueContains <- getParams readCF2 paramValueContains
paramValueData <- getParams readCF1 paramValueData
paramUnValueData <- getParams readCF1 paramUnValueData

pure $ BuiltinCostModelBase {..}

Expand Down
Loading