@@ -331,22 +331,33 @@ CompletableFuture<List<V>> invokeLoader(List<K> keys, List<Object> keyContexts,
331331        CompletableFuture <List <Try <V >>> cacheCallCF  = getFromValueCache (keys );
332332        return  cacheCallCF .thenCompose (cachedValues  -> {
333333
334-             assertState (keys .size () == cachedValues .size (), () -> "The size of the cached values MUST be the same size as the key list" );
335- 
336334            // the following is NOT a Map because keys in data loader can repeat (by design) 
337335            // and hence "a","b","c","b" is a valid set of keys 
338336            List <Try <V >> valuesInKeyOrder  = new  ArrayList <>();
339337            List <Integer > missedKeyIndexes  = new  ArrayList <>();
340338            List <K > missedKeys  = new  ArrayList <>();
341339            List <Object > missedKeyContexts  = new  ArrayList <>();
342-             for  (int  i  = 0 ; i  < keys .size (); i ++) {
343-                 Try <V > cacheGet  = cachedValues .get (i );
344-                 valuesInKeyOrder .add (cacheGet );
345-                 if  (cacheGet .isFailure ()) {
340+ 
341+             // if they return a ValueCachingNotSupported exception then we insert this special marker value, and it 
342+             // means it's a total miss, we need to get all these keys via the batch loader 
343+             if  (cachedValues  == NOT_SUPPORTED_LIST ) {
344+                 for  (int  i  = 0 ; i  < keys .size (); i ++) {
345+                     valuesInKeyOrder .add (ALWAYS_FAILED );
346346                    missedKeyIndexes .add (i );
347347                    missedKeys .add (keys .get (i ));
348348                    missedKeyContexts .add (keyContexts .get (i ));
349349                }
350+             } else  {
351+                 assertState (keys .size () == cachedValues .size (), () -> "The size of the cached values MUST be the same size as the key list" );
352+                 for  (int  i  = 0 ; i  < keys .size (); i ++) {
353+                     Try <V > cacheGet  = cachedValues .get (i );
354+                     valuesInKeyOrder .add (cacheGet );
355+                     if  (cacheGet .isFailure ()) {
356+                         missedKeyIndexes .add (i );
357+                         missedKeys .add (keys .get (i ));
358+                         missedKeyContexts .add (keyContexts .get (i ));
359+                     }
360+                 }
350361            }
351362            if  (missedKeys .isEmpty ()) {
352363                // 
@@ -442,9 +453,16 @@ int dispatchDepth() {
442453        }
443454    }
444455
456+     private  final  List <Try <V >> NOT_SUPPORTED_LIST  = emptyList ();
457+     private  final  CompletableFuture <List <Try <V >>> NOT_SUPPORTED  = CompletableFuture .completedFuture (NOT_SUPPORTED_LIST );
458+     private  final  Try <V > ALWAYS_FAILED  = Try .alwaysFailed ();
459+ 
445460    private  CompletableFuture <List <Try <V >>> getFromValueCache (List <K > keys ) {
446461        try  {
447462            return  nonNull (valueCache .getValues (keys ), () -> "Your ValueCache.getValues function MUST return a non null CompletableFuture" );
463+         } catch  (ValueCache .ValueCachingNotSupported  ignored ) {
464+             // use of a final field prevents CF object allocation for this special purpose 
465+             return  NOT_SUPPORTED ;
448466        } catch  (RuntimeException  e ) {
449467            return  CompletableFutureKit .failedFuture (e );
450468        }
@@ -456,16 +474,18 @@ private CompletableFuture<List<V>> setToValueCache(List<V> assembledValues, List
456474            if  (completeValueAfterCacheSet ) {
457475                return  nonNull (valueCache 
458476                        .setValues (missedKeys , missedValues ), () -> "Your ValueCache.setValues function MUST return a non null CompletableFuture" )
459-                         // we dont  trust the set cache to give us the values back - we have them - lets use them 
460-                         // if the cache set fails - then they wont  be in cache and maybe next time they will 
477+                         // we don't  trust the set cache to give us the values back - we have them - lets use them 
478+                         // if the cache set fails - then they won't  be in cache and maybe next time they will 
461479                        .handle ((ignored , setExIgnored ) -> assembledValues );
462480            } else  {
463481                // no one is waiting for the set to happen here so if its truly async 
464-                 // it will happen eventually but no result will be dependant  on it 
482+                 // it will happen eventually but no result will be dependent  on it 
465483                valueCache .setValues (missedKeys , missedValues );
466484            }
485+         } catch  (ValueCache .ValueCachingNotSupported  ignored ) {
486+             // ok no set caching is fine if they say so 
467487        } catch  (RuntimeException  ignored ) {
468-             // if we cant  set values back into the cache - so be it - this must be a faulty 
488+             // if we can't  set values back into the cache - so be it - this must be a faulty 
469489            // ValueCache implementation 
470490        }
471491        return  CompletableFuture .completedFuture (assembledValues );
0 commit comments