From b8194f53d71d2e4d874ee6936cf313eedca987b2 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 07:58:59 +0000 Subject: [PATCH 01/22] Initial plan From bc3c983ad3f7e6df976e9cf55bd7ea45f0d23b89 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 08:28:20 +0000 Subject: [PATCH 02/22] feat: add next-page prefetch cache for paginated SELECT queries Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/ServerConfiguration.java | 49 ++- .../grpc/server/StatementServiceImpl.java | 101 +++++ .../grpc/server/paging/CachedPage.java | 58 +++ .../server/paging/NextPagePrefetchCache.java | 390 ++++++++++++++++++ .../grpc/server/paging/PageInfo.java | 49 +++ .../server/paging/PaginationDetector.java | 201 +++++++++ .../paging/NextPagePrefetchCacheTest.java | 241 +++++++++++ .../server/paging/PaginationDetectorTest.java | 277 +++++++++++++ 8 files changed, 1365 insertions(+), 1 deletion(-) create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java create mode 100644 ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java create mode 100644 ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index c2893d67f..7fca87637 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -68,6 +68,12 @@ public class ServerConfiguration { private static final String TELEMETRY_GRPC_METRICS_ENABLED_KEY = "ojp.telemetry.grpc.metrics.enabled"; private static final String TELEMETRY_POOL_METRICS_ENABLED_KEY = "ojp.telemetry.pool.metrics.enabled"; + // Next-page prefetch cache configuration keys + private static final String NEXT_PAGE_CACHE_ENABLED_KEY = "ojp.server.nextPageCache.enabled"; + private static final String NEXT_PAGE_CACHE_TTL_SECONDS_KEY = "ojp.server.nextPageCache.ttlSeconds"; + private static final String NEXT_PAGE_CACHE_MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries"; + private static final String NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs"; + // TLS configuration keys private static final String TLS_ENABLED_KEY = "ojp.server.tls.enabled"; private static final String TLS_KEYSTORE_PATH_KEY = "ojp.server.tls.keystore.path"; @@ -135,6 +141,12 @@ public class ServerConfiguration { public static final boolean DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled public static final boolean DEFAULT_TELEMETRY_POOL_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled + // Next-page prefetch cache default values + public static final boolean DEFAULT_NEXT_PAGE_CACHE_ENABLED = false; // Disabled by default, opt-in + public static final long DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS = 300; // 5 minutes + public static final int DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES = 100; + public static final long DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS = 5000; // 5 seconds + // TLS default values public static final boolean DEFAULT_TLS_ENABLED = false; // Disabled by default for backwards compatibility public static final boolean DEFAULT_TLS_CLIENT_AUTH_REQUIRED = false; // mTLS disabled by default @@ -211,6 +223,12 @@ public class ServerConfiguration { private final boolean tlsClientAuthRequired; + // Next-page prefetch cache configuration + private final boolean nextPageCacheEnabled; + private final long nextPageCacheTtlSeconds; + private final int nextPageCacheMaxEntries; + private final long nextPageCachePrefetchWaitTimeoutMs; + public ServerConfiguration() { this.serverPort = getIntProperty(SERVER_PORT_KEY, DEFAULT_SERVER_PORT); this.prometheusPort = getIntProperty(PROMETHEUS_PORT_KEY, DEFAULT_PROMETHEUS_PORT); @@ -274,6 +292,12 @@ public ServerConfiguration() { this.telemetryGrpcMetricsEnabled = getBooleanProperty(TELEMETRY_GRPC_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED); this.telemetryPoolMetricsEnabled = getBooleanProperty(TELEMETRY_POOL_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_POOL_METRICS_ENABLED); + // Next-page prefetch cache configuration + this.nextPageCacheEnabled = getBooleanProperty(NEXT_PAGE_CACHE_ENABLED_KEY, DEFAULT_NEXT_PAGE_CACHE_ENABLED); + this.nextPageCacheTtlSeconds = getLongProperty(NEXT_PAGE_CACHE_TTL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); + this.nextPageCacheMaxEntries = getIntProperty(NEXT_PAGE_CACHE_MAX_ENTRIES_KEY, DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES); + this.nextPageCachePrefetchWaitTimeoutMs = getLongProperty(NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY, DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS); + logConfigurationSummary(); } @@ -416,6 +440,13 @@ private void logConfigurationSummary() { logger.info(" Tracing Service Name: {}", tracingServiceName); logger.info(" Tracing Sample Rate: {}", tracingSampleRate); } + logger.info("Next-Page Prefetch Cache Configuration:"); + logger.info(" Next-Page Cache Enabled: {}", nextPageCacheEnabled); + if (nextPageCacheEnabled) { + logger.info(" Next-Page Cache TTL: {} seconds", nextPageCacheTtlSeconds); + logger.info(" Next-Page Cache Max Entries: {}", nextPageCacheMaxEntries); + logger.info(" Next-Page Cache Prefetch Wait Timeout: {} ms", nextPageCachePrefetchWaitTimeoutMs); + } } /** @@ -641,5 +672,21 @@ public boolean isTelemetryGrpcMetricsEnabled() { public boolean isTelemetryPoolMetricsEnabled() { return telemetryPoolMetricsEnabled; } - + + public boolean isNextPageCacheEnabled() { + return nextPageCacheEnabled; + } + + public long getNextPageCacheTtlSeconds() { + return nextPageCacheTtlSeconds; + } + + public int getNextPageCacheMaxEntries() { + return nextPageCacheMaxEntries; + } + + public long getNextPageCachePrefetchWaitTimeoutMs() { + return nextPageCachePrefetchWaitTimeoutMs; + } + } \ No newline at end of file diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index 1aacb6a1b..ffb42dfdb 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -21,9 +21,12 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import org.openjproxy.constants.CommonConstants; import org.openjproxy.grpc.ProtoConverter; +import org.openjproxy.grpc.dto.OpQueryResult; import org.openjproxy.grpc.dto.Parameter; import org.openjproxy.grpc.server.action.resource.CallResourceAction; +import org.openjproxy.grpc.server.action.session.ResultSetHelper; import org.openjproxy.grpc.server.action.session.TerminateSessionAction; import org.openjproxy.grpc.server.action.transaction.CommitTransactionAction; import org.openjproxy.grpc.server.action.transaction.RollbackTransactionAction; @@ -35,6 +38,11 @@ import org.openjproxy.grpc.server.action.xa.XaRecoverAction; import org.openjproxy.grpc.server.action.xa.XaRollbackAction; import org.openjproxy.grpc.server.action.xa.XaStartAction; +import org.openjproxy.grpc.server.paging.CachedPage; +import org.openjproxy.grpc.server.paging.NextPagePrefetchCache; +import org.openjproxy.grpc.server.paging.PageInfo; +import org.openjproxy.grpc.server.paging.PaginationDetector; +import org.openjproxy.grpc.server.resultset.ResultSetWrapper; import org.openjproxy.grpc.server.statement.StatementFactory; import org.openjproxy.xa.pool.XATransactionRegistry; import org.openjproxy.xa.pool.spi.XAConnectionPoolProvider; @@ -47,6 +55,7 @@ import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; @@ -72,6 +81,9 @@ public class StatementServiceImpl extends StatementServiceGrpc.StatementServiceI // SQL Enhancer Engine for query optimization private final org.openjproxy.grpc.server.sql.SqlEnhancerEngine sqlEnhancerEngine; + // Next-page prefetch cache for paginated queries (disabled by default) + private final NextPagePrefetchCache nextPagePrefetchCache; + // Multinode XA coordinator for distributing transaction limits private static final MultinodeXaCoordinator xaCoordinator = new MultinodeXaCoordinator(); @@ -90,6 +102,12 @@ public StatementServiceImpl(SessionManager sessionManager, CircuitBreakerRegistr // Server configuration for creating segregation managers this.sqlEnhancerEngine = new org.openjproxy.grpc.server.sql.SqlEnhancerEngine( serverConfiguration.isSqlEnhancerEnabled()); + // Next-page prefetch cache (disabled by default) + this.nextPagePrefetchCache = new NextPagePrefetchCache( + serverConfiguration.isNextPageCacheEnabled(), + serverConfiguration.getNextPageCacheMaxEntries(), + serverConfiguration.getNextPageCacheTtlSeconds(), + serverConfiguration.getNextPageCachePrefetchWaitTimeoutMs()); initializeXAPoolProvider(); // Create SQL statement metrics from the registered OpenTelemetry instance (if available) @@ -287,18 +305,101 @@ private void executeQueryInternal(StatementRequest request, StreamObserver params = ProtoConverter.fromProtoList(request.getParametersList()); + + // ---- Next-page prefetch cache ---- + if (nextPagePrefetchCache.isEnabled()) { + Optional cached = nextPagePrefetchCache.getIfReady(sql); + if (cached.isPresent()) { + CachedPage page = cached.get(); + // Start prefetch for the page after this one before returning the cached result + startNextPagePrefetch(sql, params, dto.getSession().getConnHash()); + streamCachedPage(page, dto.getSession(), responseObserver); + return; + } + } + // ---- End next-page prefetch cache check ---- + if (CollectionUtils.isNotEmpty(params)) { PreparedStatement ps = StatementFactory.createPreparedStatement(sessionManager, dto, sql, params, request); String resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), ps.executeQuery()); + // Start prefetch for the next page while the current page is being streamed + startNextPagePrefetch(sql, params, dto.getSession().getConnHash()); handleResultSet(actionContext, dto.getSession(), resultSetUUID, responseObserver); } else { Statement stmt = StatementFactory.createStatement(sessionManager, dto.getConnection(), request); String resultSetUUID = this.sessionManager.registerResultSet(dto.getSession(), stmt.executeQuery(sql)); + // Start prefetch for the next page while the current page is being streamed + startNextPagePrefetch(sql, params, dto.getSession().getConnHash()); handleResultSet(actionContext, dto.getSession(), resultSetUUID, responseObserver); } } + /** + * Starts an asynchronous prefetch of the next page for the given SQL, if the feature + * is enabled and the SQL contains a recognised pagination clause. + * + * @param sql the current paginated SQL + * @param params the query parameters (used as-is for the next-page query) + * @param connHash the connection hash used to look up the DataSource + */ + private void startNextPagePrefetch(String sql, List params, String connHash) { + if (!nextPagePrefetchCache.isEnabled()) { + return; + } + Optional pageInfo = PaginationDetector.detect(sql); + if (pageInfo.isEmpty()) { + return; + } + String nextPageSql = PaginationDetector.buildNextPageSql(sql, pageInfo.get()); + if (nextPageSql == null) { + return; + } + DataSource dataSource = datasourceMap.get(connHash); + if (dataSource == null) { + log.debug("No DataSource found for prefetch, connHash={}", connHash); + return; + } + nextPagePrefetchCache.prefetchAsync(dataSource, nextPageSql, params); + } + + /** + * Streams the rows held in a {@link CachedPage} directly to the gRPC response observer, + * using the same chunking strategy as {@link ResultSetHelper#handleResultSet}. + * + * @param page the cached page to stream + * @param session the current session info (embedded in each response message) + * @param responseObserver the gRPC observer to stream results into + */ + private static void streamCachedPage(CachedPage page, SessionInfo session, + StreamObserver responseObserver) { + OpQueryResult.OpQueryResultBuilder queryResultBuilder = OpQueryResult.builder(); + queryResultBuilder.labels(page.getColumnLabels()); + + List batch = new ArrayList<>(); + int row = 0; + boolean justSent = false; + + for (Object[] rowValues : page.getRows()) { + justSent = false; + row++; + batch.add(rowValues); + if (row % CommonConstants.ROWS_PER_RESULT_SET_DATA_BLOCK == 0) { + justSent = true; + responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, + queryResultBuilder, null, "")); + queryResultBuilder = OpQueryResult.builder(); + batch = new ArrayList<>(); + } + } + + if (!justSent) { + responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, + queryResultBuilder, null, "")); + } + responseObserver.onCompleted(); + } + @Override public void fetchNextRows(ResultSetFetchRequest request, StreamObserver responseObserver) { org.openjproxy.grpc.server.action.transaction.FetchNextRowsAction.getInstance() diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java new file mode 100644 index 000000000..f672972b6 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java @@ -0,0 +1,58 @@ +package org.openjproxy.grpc.server.paging; + +import java.util.Collections; +import java.util.List; + +/** + * Holds a single cached page of query results. + * + *

Instances are immutable once created. The {@link #isExpired(long)} method + * can be used to check whether the entry has exceeded its time-to-live.

+ */ +public class CachedPage { + + private final List columnLabels; + private final List rows; + private final long createdAtMs; + + /** + * @param columnLabels ordered list of column names from the result set metadata + * @param rows result rows; each element is an array of column values + */ + public CachedPage(List columnLabels, List rows) { + this.columnLabels = Collections.unmodifiableList(columnLabels); + this.rows = Collections.unmodifiableList(rows); + this.createdAtMs = System.currentTimeMillis(); + } + + /** + * Returns the ordered list of column labels for this result set. + */ + public List getColumnLabels() { + return columnLabels; + } + + /** + * Returns the cached rows. Each element is an array of column values + * in the same order as {@link #getColumnLabels()}. + */ + public List getRows() { + return rows; + } + + /** + * Returns the epoch milliseconds at which this entry was created. + */ + public long getCreatedAtMs() { + return createdAtMs; + } + + /** + * Returns {@code true} if the entry is older than {@code ttlMs} milliseconds. + * + * @param ttlMs time-to-live in milliseconds + */ + public boolean isExpired(long ttlMs) { + return System.currentTimeMillis() - createdAtMs > ttlMs; + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java new file mode 100644 index 000000000..c0b2ca4a6 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -0,0 +1,390 @@ +package org.openjproxy.grpc.server.paging; + +import lombok.extern.slf4j.Slf4j; +import org.openjproxy.grpc.dto.Parameter; +import org.openjproxy.grpc.dto.ParameterType; + +import javax.sql.DataSource; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +/** + * Cache for pre-fetched next pages of paginated SELECT queries. + * + *

Behaviour

+ *
    + *
  1. When a paginated query is executed, the server fires a virtual thread that + * executes the next page SQL against the database and stores the result + * in this cache, keyed by the (trimmed) next-page SQL string.
  2. + *
  3. When the client subsequently requests the next page, the server first checks + * this cache. If a matching entry is found the result is served from memory, + * and another prefetch is started for the page after that.
  4. + *
  5. If the client requests a page that is still being fetched, the server waits + * up to {@code prefetchWaitTimeoutMs} for the operation to complete before + * falling back to a regular database query.
  6. + *
+ * + *

Limitations (first-pass implementation)

+ *
    + *
  • CLOB / NCLOB columns are not cached – the prefetch skips storing the page.
  • + *
  • Parameters of type BLOB or CLOB (LOB references) are not supported in + * prefetch queries and will cause the prefetch to be skipped.
  • + *
  • This feature is disabled by default and must be enabled + * via {@code ojp.server.nextPageCache.enabled=true}.
  • + *
+ * + *

Thread safety

+ * All public methods are thread-safe. The internal cache uses a + * {@link ConcurrentHashMap} and prefetch threads are Java 21 virtual threads. + */ +@Slf4j +public class NextPagePrefetchCache { + + private final boolean enabled; + private final int maxEntries; + private final long ttlMs; + private final long prefetchWaitTimeoutMs; + + /** + * Maps the (trimmed) next-page SQL to the asynchronous result of the prefetch. + * A {@code null} value inside the future signals that caching was skipped + * (e.g., CLOB columns detected). + */ + private final ConcurrentHashMap> cache + = new ConcurrentHashMap<>(); + + /** + * Creates a new cache instance. + * + * @param enabled whether the feature is enabled + * @param maxEntries maximum number of entries to keep (oldest removed first) + * @param ttlSeconds time-to-live for each entry in seconds + * @param prefetchWaitTimeoutMs max time (ms) to wait for an in-progress prefetch + * before falling back to a live DB query + */ + public NextPagePrefetchCache(boolean enabled, int maxEntries, + long ttlSeconds, long prefetchWaitTimeoutMs) { + this.enabled = enabled; + this.maxEntries = maxEntries; + this.ttlMs = ttlSeconds * 1000L; + this.prefetchWaitTimeoutMs = prefetchWaitTimeoutMs; + } + + /** + * Returns {@code true} when this cache is enabled. + */ + public boolean isEnabled() { + return enabled; + } + + // ----------------------------------------------------------------- + // Cache read + // ----------------------------------------------------------------- + + /** + * Retrieves the cached page for the given SQL, waiting up to + * {@code prefetchWaitTimeoutMs} when the prefetch is still in progress. + * + *

Returns an empty Optional when:

+ *
    + *
  • no entry exists for {@code sql}
  • + *
  • the entry is expired
  • + *
  • the prefetch failed, returned a null result (e.g., CLOB detected), or timed out
  • + *
+ * + *

The entry is removed from the cache after a successful retrieval (single-use + * semantics) so that concurrent requests for the same page can each independently + * obtain the result and start the next prefetch.

+ * + * @param sql the exact paginated SQL sent by the client + * @return an Optional containing the cached page, or empty if unavailable + */ + public Optional getIfReady(String sql) { + String key = normalizeKey(sql); + CompletableFuture future = cache.get(key); + if (future == null) { + return Optional.empty(); + } + + try { + CachedPage page = future.get(prefetchWaitTimeoutMs, TimeUnit.MILLISECONDS); + // Remove after use (single-use semantics; if another thread also grabs + // the same entry concurrently, it gets a copy of the same data). + cache.remove(key, future); + + if (page == null) { + log.debug("Prefetch for '{}' returned no-cache result (e.g. CLOB columns)", abbreviate(sql)); + return Optional.empty(); + } + if (page.isExpired(ttlMs)) { + log.debug("Cached page for '{}' has expired", abbreviate(sql)); + return Optional.empty(); + } + log.debug("Cache HIT for '{}' ({} rows)", abbreviate(sql), page.getRows().size()); + return Optional.of(page); + + } catch (java.util.concurrent.TimeoutException e) { + log.debug("Prefetch for '{}' did not complete within {}ms – falling back to live query", + abbreviate(sql), prefetchWaitTimeoutMs); + return Optional.empty(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + log.warn("Interrupted while waiting for prefetch of '{}'", abbreviate(sql)); + return Optional.empty(); + } catch (Exception e) { + log.warn("Prefetch for '{}' failed: {}", abbreviate(sql), e.getMessage()); + cache.remove(key, future); + return Optional.empty(); + } + } + + // ----------------------------------------------------------------- + // Cache write / prefetch trigger + // ----------------------------------------------------------------- + + /** + * Starts an asynchronous prefetch of {@code nextPageSql} on a virtual thread. + * + *

The method returns immediately. If an entry for {@code nextPageSql} + * already exists (either in-progress or completed), no new prefetch is started. + * Entries are evicted lazily when the cache exceeds {@code maxEntries}.

+ * + *

BLOB/CLOB parameters are not supported; if any parameter has type + * {@code BLOB} or {@code CLOB} the prefetch is silently skipped.

+ * + * @param dataSource the DataSource from which to obtain a dedicated prefetch connection + * @param nextPageSql the SQL for the next page (produced by {@link PaginationDetector#buildNextPageSql}) + * @param params the query parameters (may be null or empty for non-prepared queries) + */ + public void prefetchAsync(DataSource dataSource, String nextPageSql, List params) { + if (!enabled || dataSource == null || nextPageSql == null) { + return; + } + + // Skip if any parameter is a LOB reference (session-scoped, can't be used in prefetch) + if (params != null && params.stream().anyMatch(NextPagePrefetchCache::isLobParameter)) { + log.debug("Skipping prefetch – query contains LOB parameters"); + return; + } + + String key = normalizeKey(nextPageSql); + + // Don't prefetch if already in-progress or completed + if (cache.containsKey(key)) { + log.debug("Prefetch already in progress/completed for '{}'", abbreviate(nextPageSql)); + return; + } + + // Evict stale entries before inserting to respect max-size + if (cache.size() >= maxEntries) { + evictExpiredOrCompleted(); + } + + CompletableFuture future = new CompletableFuture<>(); + // putIfAbsent avoids a race where two callers try to start the same prefetch + if (cache.putIfAbsent(key, future) != null) { + // Another thread won the race + return; + } + + log.debug("Starting prefetch for '{}'", abbreviate(nextPageSql)); + + List paramsCopy = params == null ? List.of() : List.copyOf(params); + + Thread.ofVirtual().name("ojp-next-page-prefetch").start(() -> { + try (Connection conn = dataSource.getConnection()) { + CachedPage page = executeAndReadAllRows(conn, nextPageSql, paramsCopy); + future.complete(page); // null signals "skip cache" + log.debug("Prefetch completed for '{}' ({} rows cached)", + abbreviate(nextPageSql), + page != null ? page.getRows().size() : 0); + } catch (Exception e) { + log.warn("Prefetch failed for '{}': {}", abbreviate(nextPageSql), e.getMessage()); + future.completeExceptionally(e); + cache.remove(key, future); + } + }); + } + + // ----------------------------------------------------------------- + // Internal helpers + // ----------------------------------------------------------------- + + /** + * Executes {@code sql} using the given connection and materialises all result + * rows into a {@link CachedPage}. + * + *

Returns {@code null} when caching should be skipped (CLOB columns detected).

+ */ + private static CachedPage executeAndReadAllRows(Connection conn, String sql, + List params) throws SQLException { + ResultSet rs; + if (params.isEmpty()) { + Statement stmt = conn.createStatement(); + rs = stmt.executeQuery(sql); + } else { + PreparedStatement ps = conn.prepareStatement(sql); + setNonLobParameters(ps, params); + rs = ps.executeQuery(); + } + + return readAllRows(rs); + } + + /** + * Materialises all rows from {@code rs}. Returns {@code null} when the + * result set contains CLOB/NCLOB columns (caching is not supported for those). + */ + private static CachedPage readAllRows(ResultSet rs) throws SQLException { + ResultSetMetaData meta = rs.getMetaData(); + int colCount = meta.getColumnCount(); + + // Collect column labels + List labels = new ArrayList<>(colCount); + for (int i = 1; i <= colCount; i++) { + labels.add(meta.getColumnName(i)); + } + + // Skip caching if CLOB / NCLOB columns are present + for (int i = 1; i <= colCount; i++) { + int sqlType = meta.getColumnType(i); + if (sqlType == Types.CLOB || sqlType == Types.NCLOB) { + log.debug("Skipping cache – CLOB/NCLOB column detected at index {}", i); + return null; + } + } + + // Read all rows eagerly; convert binary types to byte arrays + List rows = new ArrayList<>(); + while (rs.next()) { + Object[] row = new Object[colCount]; + for (int i = 0; i < colCount; i++) { + row[i] = readColumnValue(rs, i + 1, meta.getColumnType(i + 1)); + } + rows.add(row); + } + + return new CachedPage(labels, rows); + } + + /** + * Reads a single column value, eagerly materialising BLOB / binary data as + * {@code byte[]} so that it remains valid after the connection is closed. + */ + private static Object readColumnValue(ResultSet rs, int col, int sqlType) throws SQLException { + switch (sqlType) { + case Types.BLOB: + case Types.LONGVARBINARY: { + java.sql.Blob blob = rs.getBlob(col); + if (blob == null) { + return null; + } + try { + return blob.getBinaryStream().readAllBytes(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read BLOB data", e); + } + } + case Types.VARBINARY: + case Types.BINARY: + return rs.getBytes(col); + case Types.DATE: + return rs.getDate(col); + case Types.TIMESTAMP: + return rs.getTimestamp(col); + default: + return rs.getObject(col); + } + } + + /** + * Sets non-LOB parameters on a PreparedStatement using the parameter list. + * Only handles basic JDBC types (INT, LONG, STRING, DOUBLE, etc.). + * LOB parameters are rejected before this method is called. + */ + private static void setNonLobParameters(PreparedStatement ps, + List params) throws SQLException { + for (Parameter param : params) { + int idx = param.getIndex(); + if (param.getValues().isEmpty()) { + ps.setNull(idx, java.sql.Types.NULL); + continue; + } + Object value = param.getValues().get(0); + ParameterType type = param.getType(); + + switch (type) { + case INT -> ps.setInt(idx, (int) value); + case SHORT -> ps.setShort(idx, ((Number) value).shortValue()); + case LONG -> ps.setLong(idx, (long) value); + case DOUBLE -> ps.setDouble(idx, (double) value); + case FLOAT -> ps.setFloat(idx, (float) value); + case BOOLEAN -> ps.setBoolean(idx, (boolean) value); + case STRING -> ps.setString(idx, (String) value); + case BIG_DECIMAL -> ps.setBigDecimal(idx, (BigDecimal) value); + case DATE -> ps.setDate(idx, (Date) value); + case TIME -> ps.setTime(idx, (Time) value); + case TIMESTAMP -> ps.setTimestamp(idx, (Timestamp) value); + case BYTES -> ps.setBytes(idx, (byte[]) value); + case NULL -> ps.setNull(idx, (int) value); + default -> ps.setObject(idx, value); + } + } + } + + /** Returns true for parameter types that reference session-scoped LOB objects. */ + private static boolean isLobParameter(Parameter param) { + ParameterType type = param.getType(); + return type == ParameterType.BLOB || type == ParameterType.CLOB; + } + + /** + * Normalises a SQL string for use as a cache key: + * strips leading/trailing whitespace and folds to lower-case so that + * minor formatting differences do not result in cache misses. + */ + private static String normalizeKey(String sql) { + return sql.trim().toLowerCase(java.util.Locale.ROOT); + } + + /** Returns a safe short preview of an SQL string for log messages. */ + private static String abbreviate(String sql) { + if (sql == null) { + return ""; + } + return sql.length() <= 80 ? sql : sql.substring(0, 77) + "..."; + } + + /** + * Removes cache entries that are either expired or whose future has completed + * exceptionally. Called before inserting a new entry to bound cache size. + */ + private void evictExpiredOrCompleted() { + cache.entrySet().removeIf(entry -> { + CompletableFuture f = entry.getValue(); + if (!f.isDone()) { + return false; // still in progress – keep it + } + if (f.isCompletedExceptionally()) { + return true; // failed – evict + } + CachedPage page = f.getNow(null); + return page == null || page.isExpired(ttlMs); + }); + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java new file mode 100644 index 000000000..65680a384 --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java @@ -0,0 +1,49 @@ +package org.openjproxy.grpc.server.paging; + +/** + * Holds pagination information extracted from a SQL query. + * Supports multi-dialect pagination (LIMIT/OFFSET, FETCH NEXT, ROWNUM, etc.) + */ +public class PageInfo { + + private final long currentOffset; + private final long pageSize; + + public PageInfo(long currentOffset, long pageSize) { + this.currentOffset = currentOffset; + this.pageSize = pageSize; + } + + /** + * Returns the OFFSET value for the current page. + */ + public long getCurrentOffset() { + return currentOffset; + } + + /** + * Returns the number of rows per page (LIMIT / FETCH size). + */ + public long getPageSize() { + return pageSize; + } + + /** + * Returns the OFFSET value for the next page. + */ + public long getNextPageOffset() { + return currentOffset + pageSize; + } + + /** + * Returns true if this is the first page (offset == 0). + */ + public boolean isFirstPage() { + return currentOffset == 0; + } + + @Override + public String toString() { + return "PageInfo{currentOffset=" + currentOffset + ", pageSize=" + pageSize + "}"; + } +} diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java new file mode 100644 index 000000000..3e77f320d --- /dev/null +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java @@ -0,0 +1,201 @@ +package org.openjproxy.grpc.server.paging; + +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Detects pagination syntax in SQL queries across multiple database dialects and + * provides utilities to generate the next-page SQL. + * + *

Supported dialects:

+ *
    + *
  • PostgreSQL / MySQL / SQLite: {@code LIMIT n OFFSET m} or {@code LIMIT n}
  • + *
  • MySQL shorthand: {@code LIMIT m, n} (OFFSET m, page-size n)
  • + *
  • SQL Server / Oracle 12c+ / DB2: + * {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY}
  • + *
  • DB2 / Oracle first-page: {@code FETCH FIRST n ROWS ONLY}
  • + *
+ */ +public class PaginationDetector { + + // ----------------------------------------------------------------- + // Compiled patterns (immutable, thread-safe) + // ----------------------------------------------------------------- + + /** + * Pattern 1 – LIMIT n OFFSET m (PostgreSQL, MySQL ≥5.7, SQLite) + * Groups: (1)=limit, (2)=offset + */ + private static final Pattern LIMIT_OFFSET = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\s+OFFSET\\s+(\\d+)\\b" + ); + + /** + * Pattern 2 – OFFSET m ROWS? FETCH NEXT|FIRST n ROWS? ONLY (SQL Server, Oracle 12c+, DB2) + * Groups: (1)=offset, (2)=fetch-size + */ + private static final Pattern OFFSET_FETCH = Pattern.compile( + "(?i)\\bOFFSET\\s+(\\d+)\\s+ROWS?\\s+FETCH\\s+(?:NEXT|FIRST)\\s+(\\d+)\\s+ROWS?\\s+ONLY\\b" + ); + + /** + * Pattern 3 – LIMIT m, n (MySQL shorthand: first arg = OFFSET, second arg = page-size) + * Groups: (1)=offset, (2)=limit + */ + private static final Pattern LIMIT_COMMA = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\s*,\\s*(\\d+)\\b" + ); + + /** + * Pattern 4 – FETCH NEXT|FIRST n ROWS? ONLY, without preceding OFFSET (first page) + * Groups: (1)=fetch-size + */ + private static final Pattern FETCH_ONLY = Pattern.compile( + "(?i)\\bFETCH\\s+(?:NEXT|FIRST)\\s+(\\d+)\\s+ROWS?\\s+ONLY\\b" + ); + + /** + * Pattern 5 – standalone LIMIT n with no OFFSET anywhere (first page) + * Groups: (1)=limit + */ + private static final Pattern LIMIT_ONLY = Pattern.compile( + "(?i)\\bLIMIT\\s+(\\d+)\\b" + ); + + /** Used to detect any OFFSET keyword in the query (guards Pattern 5 usage). */ + private static final Pattern HAS_OFFSET = Pattern.compile( + "(?i)\\bOFFSET\\b" + ); + + // Private constructor – static utility class + private PaginationDetector() { + } + + // ----------------------------------------------------------------- + // Public API + // ----------------------------------------------------------------- + + /** + * Detects whether {@code sql} contains a pagination clause and returns the + * corresponding {@link PageInfo}. Returns an empty Optional when no + * pagination is detected or when the SQL is null/blank. + * + *

Patterns are evaluated in priority order; the first match wins.

+ * + * @param sql the SQL string to inspect + * @return an Optional containing page information, or empty if not paginated + */ + public static Optional detect(String sql) { + if (sql == null || sql.isBlank()) { + return Optional.empty(); + } + + // Pattern 1: LIMIT n OFFSET m + Matcher m1 = LIMIT_OFFSET.matcher(sql); + if (m1.find()) { + long limit = Long.parseLong(m1.group(1)); + long offset = Long.parseLong(m1.group(2)); + return Optional.of(new PageInfo(offset, limit)); + } + + // Pattern 2: OFFSET m ROWS FETCH NEXT/FIRST n ROWS ONLY + Matcher m2 = OFFSET_FETCH.matcher(sql); + if (m2.find()) { + long offset = Long.parseLong(m2.group(1)); + long fetchSize = Long.parseLong(m2.group(2)); + return Optional.of(new PageInfo(offset, fetchSize)); + } + + // Pattern 3: LIMIT m, n (MySQL shorthand) + Matcher m3 = LIMIT_COMMA.matcher(sql); + if (m3.find()) { + long offset = Long.parseLong(m3.group(1)); + long limit = Long.parseLong(m3.group(2)); + return Optional.of(new PageInfo(offset, limit)); + } + + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY (first page, offset = 0) + // Only match when there is no OFFSET clause in the same query + if (!HAS_OFFSET.matcher(sql).find()) { + Matcher m4 = FETCH_ONLY.matcher(sql); + if (m4.find()) { + long fetchSize = Long.parseLong(m4.group(1)); + return Optional.of(new PageInfo(0, fetchSize)); + } + } + + // Pattern 5: standalone LIMIT n (first page, offset = 0) + // Only match when there is no OFFSET clause in the same query + if (!HAS_OFFSET.matcher(sql).find()) { + Matcher m5 = LIMIT_ONLY.matcher(sql); + if (m5.find()) { + long limit = Long.parseLong(m5.group(1)); + return Optional.of(new PageInfo(0, limit)); + } + } + + return Optional.empty(); + } + + /** + * Builds the SQL for the next page by incrementing the OFFSET + * (or inserting one when absent) in the given SQL string. + * + *

The method applies the same pattern-priority order as {@link #detect}. + * Returns {@code null} when the next-page SQL cannot be determined.

+ * + * @param sql the original paginated SQL + * @param pageInfo the page information returned by {@link #detect} + * @return the next-page SQL, or {@code null} if transformation is not possible + */ + public static String buildNextPageSql(String sql, PageInfo pageInfo) { + if (sql == null || pageInfo == null) { + return null; + } + + long nextOffset = pageInfo.getNextPageOffset(); + + // Pattern 1: replace OFFSET value in LIMIT n OFFSET m + Matcher m1 = LIMIT_OFFSET.matcher(sql); + if (m1.find()) { + // group(2) is the offset value; replace only that token + return sql.substring(0, m1.start(2)) + nextOffset + sql.substring(m1.end(2)); + } + + // Pattern 2: replace OFFSET value in OFFSET m ROWS FETCH ... ONLY + Matcher m2 = OFFSET_FETCH.matcher(sql); + if (m2.find()) { + // group(1) is the offset value + return sql.substring(0, m2.start(1)) + nextOffset + sql.substring(m2.end(1)); + } + + // Pattern 3: replace offset in LIMIT m, n + Matcher m3 = LIMIT_COMMA.matcher(sql); + if (m3.find()) { + // group(1) is the offset value (first number in LIMIT m, n) + return sql.substring(0, m3.start(1)) + nextOffset + sql.substring(m3.end(1)); + } + + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY without OFFSET → insert OFFSET before FETCH + if (!HAS_OFFSET.matcher(sql).find()) { + Matcher m4 = FETCH_ONLY.matcher(sql); + if (m4.find()) { + int fetchStart = m4.start(); + return sql.substring(0, fetchStart) + + "OFFSET " + nextOffset + " ROWS " + + sql.substring(fetchStart); + } + } + + // Pattern 5: standalone LIMIT n → append OFFSET n + if (!HAS_OFFSET.matcher(sql).find()) { + Matcher m5 = LIMIT_ONLY.matcher(sql); + if (m5.find()) { + return sql + " OFFSET " + nextOffset; + } + } + + return null; + } +} diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java new file mode 100644 index 000000000..cc5e28d9e --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -0,0 +1,241 @@ +package org.openjproxy.grpc.server.paging; + +import org.junit.jupiter.api.Test; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.sql.Types; +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit tests for {@link NextPagePrefetchCache}. + */ +class NextPagePrefetchCacheTest { + + // ---------------------------------------------------------------- + // Helpers + // ---------------------------------------------------------------- + + private static NextPagePrefetchCache enabledCache() { + return new NextPagePrefetchCache(true, 100, 60, 5000); + } + + private static NextPagePrefetchCache disabledCache() { + return new NextPagePrefetchCache(false, 100, 60, 5000); + } + + /** + * Creates a mock DataSource that returns a ResultSet with one row + * containing a single integer column named "id" with value 42. + */ + private static DataSource mockDataSource(int rowCount) throws Exception { + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("id"); + when(meta.getColumnType(1)).thenReturn(Types.INTEGER); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + + // Simulate 'rowCount' rows + if (rowCount == 0) { + when(rs.next()).thenReturn(false); + } else { + Boolean[] nexts = new Boolean[rowCount + 1]; + for (int i = 0; i < rowCount; i++) nexts[i] = true; + nexts[rowCount] = false; + Boolean first = nexts[0]; + Boolean[] rest = new Boolean[rowCount]; + System.arraycopy(nexts, 1, rest, 0, rowCount); + when(rs.next()).thenReturn(first, rest); + } + when(rs.getObject(1)).thenReturn(42); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + + return ds; + } + + // ---------------------------------------------------------------- + // isEnabled() + // ---------------------------------------------------------------- + + @Test + void isEnabled_returnsTrueWhenConstructedEnabled() { + assertTrue(enabledCache().isEnabled()); + } + + @Test + void isEnabled_returnsFalseWhenConstructedDisabled() { + assertFalse(disabledCache().isEnabled()); + } + + // ---------------------------------------------------------------- + // getIfReady() – no entry + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsEmpty_whenNothingCached() { + NextPagePrefetchCache cache = enabledCache(); + Optional result = cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10"); + assertFalse(result.isPresent(), "Expected empty when nothing is cached"); + } + + // ---------------------------------------------------------------- + // prefetchAsync() – disabled cache + // ---------------------------------------------------------------- + + @Test + void prefetchAsync_doesNothing_whenDisabled() throws Exception { + NextPagePrefetchCache cache = disabledCache(); + DataSource ds = mockDataSource(1); + + cache.prefetchAsync(ds, "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + + // Cache should still be empty + assertFalse(cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + } + + @Test + void prefetchAsync_doesNothing_whenDataSourceIsNull() { + NextPagePrefetchCache cache = enabledCache(); + cache.prefetchAsync(null, "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + assertFalse(cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + } + + @Test + void prefetchAsync_doesNothing_whenSqlIsNull() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + cache.prefetchAsync(ds, null, List.of()); + // Nothing to assert – just must not throw + } + + // ---------------------------------------------------------------- + // prefetchAsync() + getIfReady() – happy path + // ---------------------------------------------------------------- + + @Test + void prefetchAndGet_returnsRows_forSimpleQuery() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(3); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + // Wait for the prefetch (virtual thread) to complete + Optional result = cache.getIfReady(sql); + + assertTrue(result.isPresent(), "Expected cached page"); + CachedPage page = result.get(); + assertEquals(List.of("id"), page.getColumnLabels()); + assertEquals(3, page.getRows().size()); + } + + @Test + void prefetchAndGet_cacheKeyIsCaseAndWhitespaceInsensitive() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + // Prefetch with one form of the SQL + cache.prefetchAsync(ds, "SELECT id FROM t LIMIT 10 OFFSET 10", List.of()); + + // Retrieve with slightly different casing/whitespace (should normalise to same key) + Optional result = cache.getIfReady(" SELECT ID FROM T LIMIT 10 OFFSET 10 "); + + assertTrue(result.isPresent(), "Keys should normalise to the same entry"); + } + + // ---------------------------------------------------------------- + // Single-use semantics + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsSingleUse_secondCallEmpty() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(2); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + Optional first = cache.getIfReady(sql); + assertTrue(first.isPresent(), "First retrieval should succeed"); + + // Second retrieval should return empty (entry was removed after first use) + Optional second = cache.getIfReady(sql); + assertFalse(second.isPresent(), "Second retrieval should be empty (single-use)"); + } + + // ---------------------------------------------------------------- + // Expiry + // ---------------------------------------------------------------- + + @Test + void getIfReady_returnsEmpty_whenEntryExpired() throws Exception { + // TTL = 0 seconds → immediately expired + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + // Wait a bit to ensure the prefetch completes and the entry is expired + Thread.sleep(50); + + Optional result = cache.getIfReady(sql); + assertFalse(result.isPresent(), "Entry should be expired with TTL=0"); + } + + // ---------------------------------------------------------------- + // No-duplicate prefetch + // ---------------------------------------------------------------- + + @Test + void prefetchAsync_doesNotStartDuplicate_whenKeyAlreadyPresent() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); // first start + cache.prefetchAsync(ds, sql, List.of()); // duplicate – should be ignored + + // Retrieve to confirm the entry exists (one execution) + Optional result = cache.getIfReady(sql); + assertTrue(result.isPresent()); + } + + // ---------------------------------------------------------------- + // CachedPage + // ---------------------------------------------------------------- + + @Test + void cachedPage_isNotExpired_whenJustCreated() { + CachedPage page = new CachedPage(List.of("col"), List.of()); + assertFalse(page.isExpired(60_000), "Freshly created page should not be expired"); + } + + @Test + void cachedPage_isExpired_withZeroTtl() throws Exception { + CachedPage page = new CachedPage(List.of("col"), List.of()); + Thread.sleep(10); // small delay so currentTime > createdAt + assertTrue(page.isExpired(0), "Page should be expired with TTL=0"); + } +} diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java new file mode 100644 index 000000000..d992e48b7 --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java @@ -0,0 +1,277 @@ +package org.openjproxy.grpc.server.paging; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for {@link PaginationDetector}. + */ +class PaginationDetectorTest { + + // ---------------------------------------------------------------- + // detect() – positive cases + // ---------------------------------------------------------------- + + @Test + void detectLimitOffset_returnsCorrectPageInfo() { + String sql = "SELECT id, name FROM users ORDER BY id LIMIT 10 OFFSET 20"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent(), "Expected pagination to be detected"); + assertEquals(20, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + assertEquals(30, result.get().getNextPageOffset()); + } + + @Test + void detectLimitOffset_firstPage() { + String sql = "SELECT * FROM orders LIMIT 25 OFFSET 0"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(25, result.get().getPageSize()); + assertTrue(result.get().isFirstPage()); + } + + @Test + void detectOffsetFetch_sqlServer() { + String sql = "SELECT id, name FROM users ORDER BY id OFFSET 30 ROWS FETCH NEXT 10 ROWS ONLY"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(30, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + } + + @Test + void detectOffsetFetch_fetchFirst() { + String sql = "SELECT * FROM items OFFSET 0 ROWS FETCH FIRST 50 ROWS ONLY"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(50, result.get().getPageSize()); + } + + @Test + void detectLimitComma_mysqlShorthand() { + // MySQL: LIMIT offset, pageSize (first arg = rows to skip, second = rows to return) + String sql = "SELECT * FROM products LIMIT 20, 10"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(20, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + } + + @Test + void detectFetchOnly_noOffset_firstPage() { + String sql = "SELECT TOP_N.* FROM (SELECT * FROM t) TOP_N FETCH FIRST 10 ROWS ONLY"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + assertTrue(result.get().isFirstPage()); + } + + @Test + void detectFetchNextOnly_noOffset_firstPage() { + String sql = "SELECT * FROM t FETCH NEXT 5 ROWS ONLY"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(5, result.get().getPageSize()); + } + + @Test + void detectLimitOnly_noOffset_firstPage() { + String sql = "SELECT * FROM users WHERE active = 1 LIMIT 15"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(0, result.get().getCurrentOffset()); + assertEquals(15, result.get().getPageSize()); + } + + @Test + void detectLimitOffset_caseInsensitive() { + String sql = "select id from foo limit 5 offset 10"; + Optional result = PaginationDetector.detect(sql); + + assertTrue(result.isPresent()); + assertEquals(10, result.get().getCurrentOffset()); + assertEquals(5, result.get().getPageSize()); + } + + // ---------------------------------------------------------------- + // detect() – negative cases + // ---------------------------------------------------------------- + + @Test + void detect_returnsEmpty_forNonPaginatedQuery() { + String sql = "SELECT id, name FROM users WHERE id = 1"; + Optional result = PaginationDetector.detect(sql); + + assertFalse(result.isPresent(), "Expected no pagination"); + } + + @Test + void detect_returnsEmpty_forNullSql() { + assertFalse(PaginationDetector.detect(null).isPresent()); + } + + @Test + void detect_returnsEmpty_forBlankSql() { + assertFalse(PaginationDetector.detect(" ").isPresent()); + } + + @Test + void detect_limitOnly_notMatchedWhenOffsetPresent() { + // LIMIT n with an OFFSET keyword somewhere else – should not match Pattern 5 + String sql = "SELECT * FROM t WHERE col > 0 LIMIT 10 OFFSET 5"; + Optional result = PaginationDetector.detect(sql); + + // Pattern 1 (LIMIT n OFFSET m) should match instead + assertTrue(result.isPresent()); + assertEquals(5, result.get().getCurrentOffset()); + assertEquals(10, result.get().getPageSize()); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – LIMIT / OFFSET + // ---------------------------------------------------------------- + + @Test + void buildNextPage_limitOffset_incrementsOffset() { + String sql = "SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 0"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10", nextPage); + } + + @Test + void buildNextPage_limitOffset_secondPage_givesThirdPageSql() { + String sql = "SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 20", nextPage); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – OFFSET FETCH (SQL Server / Oracle) + // ---------------------------------------------------------------- + + @Test + void buildNextPage_offsetFetch_incrementsOffset() { + String sql = "SELECT id FROM t ORDER BY id OFFSET 0 ROWS FETCH NEXT 20 ROWS ONLY"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY", nextPage); + } + + @Test + void buildNextPage_offsetFetch_secondPage() { + String sql = "SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT id FROM t ORDER BY id OFFSET 40 ROWS FETCH NEXT 20 ROWS ONLY", nextPage); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – MySQL LIMIT m, n + // ---------------------------------------------------------------- + + @Test + void buildNextPage_limitComma_incrementsOffset() { + // MySQL LIMIT 0, 10: offset=0, pageSize=10 → next: offset=10 + String sql = "SELECT * FROM products LIMIT 0, 10"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT * FROM products LIMIT 10, 10", nextPage); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – FETCH ONLY (first-page, no OFFSET) + // ---------------------------------------------------------------- + + @Test + void buildNextPage_fetchOnly_insertsOffset() { + String sql = "SELECT * FROM t FETCH FIRST 10 ROWS ONLY"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT * FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY", nextPage); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – standalone LIMIT (first-page, no OFFSET) + // ---------------------------------------------------------------- + + @Test + void buildNextPage_limitOnly_appendsOffset() { + String sql = "SELECT * FROM users LIMIT 5"; + PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); + + String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); + + assertEquals("SELECT * FROM users LIMIT 5 OFFSET 5", nextPage); + } + + // ---------------------------------------------------------------- + // buildNextPageSql() – edge cases + // ---------------------------------------------------------------- + + @Test + void buildNextPage_returnsNull_forNullSql() { + assertNull(PaginationDetector.buildNextPageSql(null, new PageInfo(0, 10))); + } + + @Test + void buildNextPage_returnsNull_forNullPageInfo() { + assertNull(PaginationDetector.buildNextPageSql("SELECT 1", null)); + } + + // ---------------------------------------------------------------- + // Parameterised – detect then build round-trip + // ---------------------------------------------------------------- + + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + "'SELECT a FROM t LIMIT 10 OFFSET 0', 10, 0, 10", + "'SELECT a FROM t LIMIT 10 OFFSET 10', 10, 10, 20", + // MySQL LIMIT m,n: first arg = offset, second arg = page-size + "'SELECT a FROM t LIMIT 5, 20', 20, 5, 25", + "'SELECT a FROM t OFFSET 0 ROWS FETCH NEXT 10 ROWS ONLY', 10, 0, 10", + "'SELECT a FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY', 10, 10, 20" + }) + void detectAndNextOffset(String sql, long pageSize, long currentOffset, long expectedNextOffset) { + Optional pageInfo = PaginationDetector.detect(sql); + + assertTrue(pageInfo.isPresent(), "Expected pagination in: " + sql); + assertEquals(pageSize, pageInfo.get().getPageSize(), "Page size mismatch"); + assertEquals(currentOffset, pageInfo.get().getCurrentOffset(), "Current offset mismatch"); + assertEquals(expectedNextOffset, pageInfo.get().getNextPageOffset(), "Next offset mismatch"); + } +} From eb9ac07840bc829ccb932b2c0db4172bcfe8c217 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 08:36:00 +0000 Subject: [PATCH 03/22] refactor: improve thread naming and reduce duplication in executeQueryInternal Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/StatementServiceImpl.java | 15 +- .../server/paging/NextPagePrefetchCache.java | 13 +- .../NextPageCacheConfigurationTest.java | 151 ++++++++++++++++++ 3 files changed, 168 insertions(+), 11 deletions(-) create mode 100644 ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index ffb42dfdb..c99dafec9 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -319,20 +319,17 @@ private void executeQueryInternal(StatementRequest request, StreamObserver paramsCopy = params == null ? List.of() : List.copyOf(params); + // Include a safe SQL snippet in the thread name for easier thread-dump analysis + String threadName = "ojp-next-page-prefetch[" + abbreviate(nextPageSql, 40) + "]"; - Thread.ofVirtual().name("ojp-next-page-prefetch").start(() -> { + Thread.ofVirtual().name(threadName).start(() -> { try (Connection conn = dataSource.getConnection()) { CachedPage page = executeAndReadAllRows(conn, nextPageSql, paramsCopy); future.complete(page); // null signals "skip cache" @@ -364,10 +366,17 @@ private static String normalizeKey(String sql) { /** Returns a safe short preview of an SQL string for log messages. */ private static String abbreviate(String sql) { + return abbreviate(sql, 80); + } + + /** Returns a safe short preview of an SQL string, truncated to {@code maxLen} characters. */ + private static String abbreviate(String sql, int maxLen) { if (sql == null) { return ""; } - return sql.length() <= 80 ? sql : sql.substring(0, 77) + "..."; + // Remove newlines/tabs for single-line thread names + String singleLine = sql.replaceAll("[\\r\\n\\t]+", " ").trim(); + return singleLine.length() <= maxLen ? singleLine : singleLine.substring(0, maxLen - 3) + "..."; } /** diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java new file mode 100644 index 000000000..bf0fbc12e --- /dev/null +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -0,0 +1,151 @@ +package org.openjproxy.grpc.server; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests for next-page prefetch cache configuration properties in {@link ServerConfiguration}. + */ +class NextPageCacheConfigurationTest { + + private static final String ENABLED_KEY = "ojp.server.nextPageCache.enabled"; + private static final String TTL_KEY = "ojp.server.nextPageCache.ttlSeconds"; + private static final String MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries"; + private static final String WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs"; + + @BeforeEach + void clearProperties() { + System.clearProperty(ENABLED_KEY); + System.clearProperty(TTL_KEY); + System.clearProperty(MAX_ENTRIES_KEY); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @AfterEach + void cleanupProperties() { + System.clearProperty(ENABLED_KEY); + System.clearProperty(TTL_KEY); + System.clearProperty(MAX_ENTRIES_KEY); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + // ---------------------------------------------------------------- + // Defaults + // ---------------------------------------------------------------- + + @Test + void defaultConfiguration_nextPageCacheIsDisabled() { + ServerConfiguration config = new ServerConfiguration(); + + assertFalse(config.isNextPageCacheEnabled(), + "Next-page cache must be disabled by default"); + } + + @Test + void defaultConfiguration_hasExpectedDefaultValues() { + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS, + config.getNextPageCacheTtlSeconds(), "Default TTL mismatch"); + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES, + config.getNextPageCacheMaxEntries(), "Default max-entries mismatch"); + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs(), "Default prefetch-wait-timeout mismatch"); + } + + // ---------------------------------------------------------------- + // Enable via system property + // ---------------------------------------------------------------- + + @Test + void systemProperty_enabled_overridesDefault() { + System.setProperty(ENABLED_KEY, "true"); + + ServerConfiguration config = new ServerConfiguration(); + + assertTrue(config.isNextPageCacheEnabled()); + } + + @Test + void systemProperty_disabled_overridesDefault() { + System.setProperty(ENABLED_KEY, "false"); + + ServerConfiguration config = new ServerConfiguration(); + + assertFalse(config.isNextPageCacheEnabled()); + } + + // ---------------------------------------------------------------- + // Custom TTL + // ---------------------------------------------------------------- + + @Test + void systemProperty_ttlSeconds_isRespected() { + System.setProperty(TTL_KEY, "120"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(120L, config.getNextPageCacheTtlSeconds()); + } + + @Test + void systemProperty_invalidTtl_fallsBackToDefault() { + System.setProperty(TTL_KEY, "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS, + config.getNextPageCacheTtlSeconds()); + } + + // ---------------------------------------------------------------- + // Custom max entries + // ---------------------------------------------------------------- + + @Test + void systemProperty_maxEntries_isRespected() { + System.setProperty(MAX_ENTRIES_KEY, "250"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(250, config.getNextPageCacheMaxEntries()); + } + + @Test + void systemProperty_invalidMaxEntries_fallsBackToDefault() { + System.setProperty(MAX_ENTRIES_KEY, "invalid"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES, + config.getNextPageCacheMaxEntries()); + } + + // ---------------------------------------------------------------- + // Custom prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void systemProperty_prefetchWaitTimeoutMs_isRespected() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "10000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(10000L, config.getNextPageCachePrefetchWaitTimeoutMs()); + } + + @Test + void systemProperty_invalidPrefetchWaitTimeout_fallsBackToDefault() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "bad-value"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs()); + } +} From 563e3f8b7eded31d1feb2c201e8b81f31abb9c5a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 12:28:03 +0000 Subject: [PATCH 04/22] feat: materialise CLOB/NCLOB return columns in prefetch cache as String Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../server/paging/NextPagePrefetchCache.java | 73 ++++++---- .../paging/NextPagePrefetchCacheTest.java | 129 ++++++++++++++++++ 2 files changed, 176 insertions(+), 26 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 139e92f38..0694eb1a9 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -39,14 +39,16 @@ * falling back to a regular database query. * * - *

Limitations (first-pass implementation)

+ *

Materialised LOB data

+ * All column types are cached: *
    - *
  • CLOB / NCLOB columns are not cached – the prefetch skips storing the page.
  • - *
  • Parameters of type BLOB or CLOB (LOB references) are not supported in - * prefetch queries and will cause the prefetch to be skipped.
  • - *
  • This feature is disabled by default and must be enabled - * via {@code ojp.server.nextPageCache.enabled=true}.
  • + *
  • BLOB / LONGVARBINARY / VARBINARY / BINARY → materialized as {@code byte[]}
  • + *
  • CLOB / NCLOB / LONGVARCHAR / LONGNVARCHAR → materialized as {@code String}
  • + *
  • All other types → stored using {@code ResultSet.getObject()}
  • *
+ * Queries that use LOB session references as input parameters (i.e., parameters + * of type BLOB or CLOB that reference a session-scoped LOB object) are still skipped + * because those references cannot be transferred to a separate prefetch connection. * *

Thread safety

* All public methods are thread-safe. The internal cache uses a @@ -62,8 +64,6 @@ public class NextPagePrefetchCache { /** * Maps the (trimmed) next-page SQL to the asynchronous result of the prefetch. - * A {@code null} value inside the future signals that caching was skipped - * (e.g., CLOB columns detected). */ private final ConcurrentHashMap> cache = new ConcurrentHashMap<>(); @@ -104,7 +104,7 @@ public boolean isEnabled() { *
    *
  • no entry exists for {@code sql}
  • *
  • the entry is expired
  • - *
  • the prefetch failed, returned a null result (e.g., CLOB detected), or timed out
  • + *
  • the prefetch failed or timed out
  • *
* *

The entry is removed from the cache after a successful retrieval (single-use @@ -128,7 +128,7 @@ public Optional getIfReady(String sql) { cache.remove(key, future); if (page == null) { - log.debug("Prefetch for '{}' returned no-cache result (e.g. CLOB columns)", abbreviate(sql)); + log.debug("Prefetch for '{}' returned no-cache result", abbreviate(sql)); return Optional.empty(); } if (page.isExpired(ttlMs)) { @@ -230,8 +230,6 @@ public void prefetchAsync(DataSource dataSource, String nextPageSql, ListReturns {@code null} when caching should be skipped (CLOB columns detected).

*/ private static CachedPage executeAndReadAllRows(Connection conn, String sql, List params) throws SQLException { @@ -249,8 +247,8 @@ private static CachedPage executeAndReadAllRows(Connection conn, String sql, } /** - * Materialises all rows from {@code rs}. Returns {@code null} when the - * result set contains CLOB/NCLOB columns (caching is not supported for those). + * Materialises all rows from {@code rs}, eagerly reading all column values + * (including LOB types) into in-memory representations. */ private static CachedPage readAllRows(ResultSet rs) throws SQLException { ResultSetMetaData meta = rs.getMetaData(); @@ -262,16 +260,7 @@ private static CachedPage readAllRows(ResultSet rs) throws SQLException { labels.add(meta.getColumnName(i)); } - // Skip caching if CLOB / NCLOB columns are present - for (int i = 1; i <= colCount; i++) { - int sqlType = meta.getColumnType(i); - if (sqlType == Types.CLOB || sqlType == Types.NCLOB) { - log.debug("Skipping cache – CLOB/NCLOB column detected at index {}", i); - return null; - } - } - - // Read all rows eagerly; convert binary types to byte arrays + // Read all rows eagerly; materialise binary and character LOBs List rows = new ArrayList<>(); while (rs.next()) { Object[] row = new Object[colCount]; @@ -285,8 +274,14 @@ private static CachedPage readAllRows(ResultSet rs) throws SQLException { } /** - * Reads a single column value, eagerly materialising BLOB / binary data as - * {@code byte[]} so that it remains valid after the connection is closed. + * Reads a single column value, eagerly materialising LOB data so that + * it remains valid after the connection is closed: + *
    + *
  • BLOB / LONGVARBINARY → {@code byte[]}
  • + *
  • VARBINARY / BINARY → {@code byte[]}
  • + *
  • CLOB / NCLOB / LONGVARCHAR / LONGNVARCHAR → {@code String}
  • + *
  • All other types → returned as-is via {@code ResultSet.getObject()}
  • + *
*/ private static Object readColumnValue(ResultSet rs, int col, int sqlType) throws SQLException { switch (sqlType) { @@ -305,6 +300,32 @@ private static Object readColumnValue(ResultSet rs, int col, int sqlType) throws case Types.VARBINARY: case Types.BINARY: return rs.getBytes(col); + case Types.CLOB: + case Types.LONGVARCHAR: { + try (java.io.Reader reader = rs.getCharacterStream(col)) { + if (reader == null) { + return null; + } + java.io.StringWriter sw = new java.io.StringWriter(); + reader.transferTo(sw); + return sw.toString(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read CLOB data", e); + } + } + case Types.NCLOB: + case Types.LONGNVARCHAR: { + try (java.io.Reader reader = rs.getNCharacterStream(col)) { + if (reader == null) { + return null; + } + java.io.StringWriter sw = new java.io.StringWriter(); + reader.transferTo(sw); + return sw.toString(); + } catch (java.io.IOException e) { + throw new SQLException("Failed to read NCLOB data", e); + } + } case Types.DATE: return rs.getDate(col); case Types.TIMESTAMP: diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index cc5e28d9e..372ff45f3 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -13,6 +13,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -238,4 +239,132 @@ void cachedPage_isExpired_withZeroTtl() throws Exception { Thread.sleep(10); // small delay so currentTime > createdAt assertTrue(page.isExpired(0), "Page should be expired with TTL=0"); } + + // ---------------------------------------------------------------- + // CLOB / NCLOB return columns + // ---------------------------------------------------------------- + + /** + * Creates a mock DataSource whose ResultSet returns one row with one CLOB column. + * The CLOB content is materialised as the {@code String} returned by + * {@code getCharacterStream()}. + */ + private static DataSource mockDataSourceWithClob(String clobContent) throws Exception { + java.io.Reader reader = new java.io.StringReader(clobContent); + + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("description"); + when(meta.getColumnType(1)).thenReturn(Types.CLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getCharacterStream(1)).thenReturn(reader); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + return ds; + } + + /** + * Creates a mock DataSource whose ResultSet returns one row with one NCLOB column. + */ + private static DataSource mockDataSourceWithNClob(String nclobContent) throws Exception { + java.io.Reader reader = new java.io.StringReader(nclobContent); + + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("content"); + when(meta.getColumnType(1)).thenReturn(Types.NCLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getNCharacterStream(1)).thenReturn(reader); + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + return ds; + } + + @Test + void prefetchAndGet_cachesClobColumns_asString() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + String clobContent = "This is a large text value stored as CLOB"; + DataSource ds = mockDataSourceWithClob(clobContent); + + String sql = "SELECT description FROM articles LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + Optional result = cache.getIfReady(sql); + + assertTrue(result.isPresent(), "CLOB column query should be cached"); + CachedPage page = result.get(); + assertEquals(1, page.getRows().size()); + assertEquals(clobContent, page.getRows().get(0)[0], + "CLOB content should be materialised as String"); + } + + @Test + void prefetchAndGet_cachesNclobColumns_asString() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + String nclobContent = "Unicode text: こんにちは"; + DataSource ds = mockDataSourceWithNClob(nclobContent); + + String sql = "SELECT content FROM docs LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + Optional result = cache.getIfReady(sql); + + assertTrue(result.isPresent(), "NCLOB column query should be cached"); + CachedPage page = result.get(); + assertEquals(1, page.getRows().size()); + assertEquals(nclobContent, page.getRows().get(0)[0], + "NCLOB content should be materialised as String"); + } + + @Test + void prefetchAndGet_handlesNullClobValue() throws Exception { + ResultSetMetaData meta = mock(ResultSetMetaData.class); + when(meta.getColumnCount()).thenReturn(1); + when(meta.getColumnName(1)).thenReturn("description"); + when(meta.getColumnType(1)).thenReturn(Types.CLOB); + + ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(meta); + when(rs.next()).thenReturn(true, false); + when(rs.getCharacterStream(1)).thenReturn(null); // NULL CLOB + + Statement stmt = mock(Statement.class); + when(stmt.executeQuery(anyString())).thenReturn(rs); + + Connection conn = mock(Connection.class); + when(conn.createStatement()).thenReturn(stmt); + + DataSource ds = mock(DataSource.class); + when(ds.getConnection()).thenReturn(conn); + + NextPagePrefetchCache cache = enabledCache(); + String sql = "SELECT description FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, sql, List.of()); + + Optional result = cache.getIfReady(sql); + + assertTrue(result.isPresent(), "Null CLOB should be cached as null value"); + assertFalse(result.get().getRows().isEmpty()); + assertNull(result.get().getRows().get(0)[0], "Null CLOB column should be null in cache"); + } } From 77195b9a34400b4e78384d1e53a0158f051cbb03 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 12:57:46 +0000 Subject: [PATCH 05/22] fix: datasource-isolated cache keys + background cleanup job for next-page prefetch cache Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/ServerConfiguration.java | 11 +- .../grpc/server/StatementServiceImpl.java | 10 +- .../server/paging/NextPagePrefetchCache.java | 113 ++++++++++++--- .../NextPageCacheConfigurationTest.java | 46 +++++- .../paging/NextPagePrefetchCacheTest.java | 134 ++++++++++++++---- 5 files changed, 257 insertions(+), 57 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index 7fca87637..fde71ccfe 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -73,6 +73,7 @@ public class ServerConfiguration { private static final String NEXT_PAGE_CACHE_TTL_SECONDS_KEY = "ojp.server.nextPageCache.ttlSeconds"; private static final String NEXT_PAGE_CACHE_MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries"; private static final String NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs"; + private static final String NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS_KEY = "ojp.server.nextPageCache.cleanupIntervalSeconds"; // TLS configuration keys private static final String TLS_ENABLED_KEY = "ojp.server.tls.enabled"; @@ -143,9 +144,10 @@ public class ServerConfiguration { // Next-page prefetch cache default values public static final boolean DEFAULT_NEXT_PAGE_CACHE_ENABLED = false; // Disabled by default, opt-in - public static final long DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS = 300; // 5 minutes + public static final long DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS = 60; // 1 minute public static final int DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES = 100; public static final long DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS = 5000; // 5 seconds + public static final long DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS = 60; // 1 minute // TLS default values public static final boolean DEFAULT_TLS_ENABLED = false; // Disabled by default for backwards compatibility @@ -228,6 +230,7 @@ public class ServerConfiguration { private final long nextPageCacheTtlSeconds; private final int nextPageCacheMaxEntries; private final long nextPageCachePrefetchWaitTimeoutMs; + private final long nextPageCacheCleanupIntervalSeconds; public ServerConfiguration() { this.serverPort = getIntProperty(SERVER_PORT_KEY, DEFAULT_SERVER_PORT); @@ -297,6 +300,7 @@ public ServerConfiguration() { this.nextPageCacheTtlSeconds = getLongProperty(NEXT_PAGE_CACHE_TTL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); this.nextPageCacheMaxEntries = getIntProperty(NEXT_PAGE_CACHE_MAX_ENTRIES_KEY, DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES); this.nextPageCachePrefetchWaitTimeoutMs = getLongProperty(NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY, DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS); + this.nextPageCacheCleanupIntervalSeconds = getLongProperty(NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_CLEANUP_INTERVAL_SECONDS); logConfigurationSummary(); } @@ -446,6 +450,7 @@ private void logConfigurationSummary() { logger.info(" Next-Page Cache TTL: {} seconds", nextPageCacheTtlSeconds); logger.info(" Next-Page Cache Max Entries: {}", nextPageCacheMaxEntries); logger.info(" Next-Page Cache Prefetch Wait Timeout: {} ms", nextPageCachePrefetchWaitTimeoutMs); + logger.info(" Next-Page Cache Cleanup Interval: {} seconds", nextPageCacheCleanupIntervalSeconds); } } @@ -689,4 +694,8 @@ public long getNextPageCachePrefetchWaitTimeoutMs() { return nextPageCachePrefetchWaitTimeoutMs; } + public long getNextPageCacheCleanupIntervalSeconds() { + return nextPageCacheCleanupIntervalSeconds; + } + } \ No newline at end of file diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index c99dafec9..a5b387fdc 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -107,7 +107,8 @@ public StatementServiceImpl(SessionManager sessionManager, CircuitBreakerRegistr serverConfiguration.isNextPageCacheEnabled(), serverConfiguration.getNextPageCacheMaxEntries(), serverConfiguration.getNextPageCacheTtlSeconds(), - serverConfiguration.getNextPageCachePrefetchWaitTimeoutMs()); + serverConfiguration.getNextPageCachePrefetchWaitTimeoutMs(), + serverConfiguration.getNextPageCacheCleanupIntervalSeconds()); initializeXAPoolProvider(); // Create SQL statement metrics from the registered OpenTelemetry instance (if available) @@ -308,11 +309,12 @@ private void executeQueryInternal(StatementRequest request, StreamObserver cached = nextPagePrefetchCache.getIfReady(sql); + String connHash = dto.getSession().getConnHash(); + Optional cached = nextPagePrefetchCache.getIfReady(connHash, sql); if (cached.isPresent()) { CachedPage page = cached.get(); // Start prefetch for the page after this one before returning the cached result - startNextPagePrefetch(sql, params, dto.getSession().getConnHash()); + startNextPagePrefetch(sql, params, connHash); streamCachedPage(page, dto.getSession(), responseObserver); return; } @@ -357,7 +359,7 @@ private void startNextPagePrefetch(String sql, List params, String co log.debug("No DataSource found for prefetch, connHash={}", connHash); return; } - nextPagePrefetchCache.prefetchAsync(dataSource, nextPageSql, params); + nextPagePrefetchCache.prefetchAsync(dataSource, connHash, nextPageSql, params); } /** diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 0694eb1a9..2cf4c8e11 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -21,6 +21,8 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; /** @@ -30,7 +32,8 @@ *
    *
  1. When a paginated query is executed, the server fires a virtual thread that * executes the next page SQL against the database and stores the result - * in this cache, keyed by the (trimmed) next-page SQL string.
  2. + * in this cache, keyed by the datasource identifier and (trimmed) next-page SQL + * string. *
  3. When the client subsequently requests the next page, the server first checks * this cache. If a matching entry is found the result is served from memory, * and another prefetch is started for the page after that.
  4. @@ -39,6 +42,11 @@ * falling back to a regular database query. *
* + *

Datasource isolation

+ * Each cache entry is scoped to a specific datasource by including the + * {@code datasourceId} in the cache key. Two datasources executing the same SQL + * query will never share a prefetched page. + * *

Materialised LOB data

* All column types are cached: *
    @@ -50,12 +58,17 @@ * of type BLOB or CLOB that reference a session-scoped LOB object) are still skipped * because those references cannot be transferred to a separate prefetch connection. * + *

    Background cleanup

    + * When enabled, a daemon background thread runs every {@code cleanupIntervalSeconds} + * to evict expired or failed entries. Entries expire after {@code ttlSeconds} regardless + * of whether they were ever consumed. Call {@link #shutdown()} to stop the scheduler. + * *

    Thread safety

    * All public methods are thread-safe. The internal cache uses a * {@link ConcurrentHashMap} and prefetch threads are Java 21 virtual threads. */ @Slf4j -public class NextPagePrefetchCache { +public class NextPagePrefetchCache implements AutoCloseable { private final boolean enabled; private final int maxEntries; @@ -63,26 +76,48 @@ public class NextPagePrefetchCache { private final long prefetchWaitTimeoutMs; /** - * Maps the (trimmed) next-page SQL to the asynchronous result of the prefetch. + * Maps {@code "|"} to the asynchronous result of the prefetch. + * Including the datasource ID in the key ensures that two different datasources executing + * the same SQL do not share cache entries. */ private final ConcurrentHashMap> cache = new ConcurrentHashMap<>(); + /** Background scheduler for periodic eviction of expired/abandoned entries. */ + private final ScheduledExecutorService cleanupScheduler; + /** * Creates a new cache instance. * - * @param enabled whether the feature is enabled - * @param maxEntries maximum number of entries to keep (oldest removed first) - * @param ttlSeconds time-to-live for each entry in seconds - * @param prefetchWaitTimeoutMs max time (ms) to wait for an in-progress prefetch - * before falling back to a live DB query + * @param enabled whether the feature is enabled + * @param maxEntries maximum number of entries to keep (oldest removed first) + * @param ttlSeconds time-to-live for each entry in seconds + * @param prefetchWaitTimeoutMs max time (ms) to wait for an in-progress prefetch + * before falling back to a live DB query + * @param cleanupIntervalSeconds interval (seconds) between background eviction sweeps; + * {@code 0} disables the background job */ public NextPagePrefetchCache(boolean enabled, int maxEntries, - long ttlSeconds, long prefetchWaitTimeoutMs) { + long ttlSeconds, long prefetchWaitTimeoutMs, + long cleanupIntervalSeconds) { this.enabled = enabled; this.maxEntries = maxEntries; this.ttlMs = ttlSeconds * 1000L; this.prefetchWaitTimeoutMs = prefetchWaitTimeoutMs; + + if (enabled && cleanupIntervalSeconds > 0) { + this.cleanupScheduler = Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "ojp-prefetch-cache-cleanup"); + t.setDaemon(true); + return t; + }); + cleanupScheduler.scheduleAtFixedRate( + this::evictExpiredOrCompleted, + cleanupIntervalSeconds, cleanupIntervalSeconds, TimeUnit.SECONDS); + log.debug("Prefetch cache cleanup scheduled every {}s", cleanupIntervalSeconds); + } else { + this.cleanupScheduler = null; + } } /** @@ -92,17 +127,42 @@ public boolean isEnabled() { return enabled; } + /** + * Returns the current number of entries in the cache (in-progress + completed). + * Primarily intended for monitoring and testing. + */ + public int cacheSize() { + return cache.size(); + } + + /** + * Shuts down the background cleanup scheduler, if one was started. + * Safe to call multiple times. + */ + public void shutdown() { + if (cleanupScheduler != null && !cleanupScheduler.isShutdown()) { + cleanupScheduler.shutdown(); + log.debug("Prefetch cache cleanup scheduler shut down"); + } + } + + /** Implements {@link AutoCloseable} by delegating to {@link #shutdown()}. */ + @Override + public void close() { + shutdown(); + } + // ----------------------------------------------------------------- // Cache read // ----------------------------------------------------------------- /** - * Retrieves the cached page for the given SQL, waiting up to + * Retrieves the cached page for the given datasource + SQL pair, waiting up to * {@code prefetchWaitTimeoutMs} when the prefetch is still in progress. * *

    Returns an empty Optional when:

    *
      - *
    • no entry exists for {@code sql}
    • + *
    • no entry exists for {@code datasourceId} + {@code sql}
    • *
    • the entry is expired
    • *
    • the prefetch failed or timed out
    • *
    @@ -111,11 +171,14 @@ public boolean isEnabled() { * semantics) so that concurrent requests for the same page can each independently * obtain the result and start the next prefetch.

    * - * @param sql the exact paginated SQL sent by the client + * @param datasourceId the unique identifier of the datasource (e.g. connection hash); + * used to isolate entries from different datasources that may + * share the same SQL text + * @param sql the exact paginated SQL sent by the client * @return an Optional containing the cached page, or empty if unavailable */ - public Optional getIfReady(String sql) { - String key = normalizeKey(sql); + public Optional getIfReady(String datasourceId, String sql) { + String key = normalizeKey(datasourceId, sql); CompletableFuture future = cache.get(key); if (future == null) { return Optional.empty(); @@ -160,18 +223,22 @@ public Optional getIfReady(String sql) { /** * Starts an asynchronous prefetch of {@code nextPageSql} on a virtual thread. * - *

    The method returns immediately. If an entry for {@code nextPageSql} - * already exists (either in-progress or completed), no new prefetch is started. - * Entries are evicted lazily when the cache exceeds {@code maxEntries}.

    + *

    The method returns immediately. If an entry for {@code datasourceId} + + * {@code nextPageSql} already exists (either in-progress or completed), no new + * prefetch is started. Entries are evicted lazily when the cache exceeds + * {@code maxEntries}.

    * *

    BLOB/CLOB parameters are not supported; if any parameter has type * {@code BLOB} or {@code CLOB} the prefetch is silently skipped.

    * * @param dataSource the DataSource from which to obtain a dedicated prefetch connection + * @param datasourceId the unique identifier of the datasource (e.g. connection hash); + * used to scope the cache entry so two datasources do not share pages * @param nextPageSql the SQL for the next page (produced by {@link PaginationDetector#buildNextPageSql}) * @param params the query parameters (may be null or empty for non-prepared queries) */ - public void prefetchAsync(DataSource dataSource, String nextPageSql, List params) { + public void prefetchAsync(DataSource dataSource, String datasourceId, + String nextPageSql, List params) { if (!enabled || dataSource == null || nextPageSql == null) { return; } @@ -182,7 +249,7 @@ public void prefetchAsync(DataSource dataSource, String nextPageSql, List result = cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10"); + Optional result = cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10"); assertFalse(result.isPresent(), "Expected empty when nothing is cached"); } @@ -109,24 +109,24 @@ void prefetchAsync_doesNothing_whenDisabled() throws Exception { NextPagePrefetchCache cache = disabledCache(); DataSource ds = mockDataSource(1); - cache.prefetchAsync(ds, "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + cache.prefetchAsync(ds, "ds1", "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); // Cache should still be empty - assertFalse(cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + assertFalse(cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); } @Test void prefetchAsync_doesNothing_whenDataSourceIsNull() { NextPagePrefetchCache cache = enabledCache(); - cache.prefetchAsync(null, "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); - assertFalse(cache.getIfReady("SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); + cache.prefetchAsync(null, "ds1", "SELECT * FROM t LIMIT 10 OFFSET 10", List.of()); + assertFalse(cache.getIfReady("ds1", "SELECT * FROM t LIMIT 10 OFFSET 10").isPresent()); } @Test void prefetchAsync_doesNothing_whenSqlIsNull() throws Exception { NextPagePrefetchCache cache = enabledCache(); DataSource ds = mockDataSource(1); - cache.prefetchAsync(ds, null, List.of()); + cache.prefetchAsync(ds, "ds1", null, List.of()); // Nothing to assert – just must not throw } @@ -140,10 +140,10 @@ void prefetchAndGet_returnsRows_forSimpleQuery() throws Exception { DataSource ds = mockDataSource(3); String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); // Wait for the prefetch (virtual thread) to complete - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertTrue(result.isPresent(), "Expected cached page"); CachedPage page = result.get(); @@ -157,10 +157,10 @@ void prefetchAndGet_cacheKeyIsCaseAndWhitespaceInsensitive() throws Exception { DataSource ds = mockDataSource(1); // Prefetch with one form of the SQL - cache.prefetchAsync(ds, "SELECT id FROM t LIMIT 10 OFFSET 10", List.of()); + cache.prefetchAsync(ds, "ds1", "SELECT id FROM t LIMIT 10 OFFSET 10", List.of()); // Retrieve with slightly different casing/whitespace (should normalise to same key) - Optional result = cache.getIfReady(" SELECT ID FROM T LIMIT 10 OFFSET 10 "); + Optional result = cache.getIfReady("ds1", " SELECT ID FROM T LIMIT 10 OFFSET 10 "); assertTrue(result.isPresent(), "Keys should normalise to the same entry"); } @@ -175,13 +175,13 @@ void getIfReady_returnsSingleUse_secondCallEmpty() throws Exception { DataSource ds = mockDataSource(2); String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); - Optional first = cache.getIfReady(sql); + Optional first = cache.getIfReady("ds1", sql); assertTrue(first.isPresent(), "First retrieval should succeed"); // Second retrieval should return empty (entry was removed after first use) - Optional second = cache.getIfReady(sql); + Optional second = cache.getIfReady("ds1", sql); assertFalse(second.isPresent(), "Second retrieval should be empty (single-use)"); } @@ -192,16 +192,16 @@ void getIfReady_returnsSingleUse_secondCallEmpty() throws Exception { @Test void getIfReady_returnsEmpty_whenEntryExpired() throws Exception { // TTL = 0 seconds → immediately expired - NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000); + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000, 0); DataSource ds = mockDataSource(1); String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); // Wait a bit to ensure the prefetch completes and the entry is expired Thread.sleep(50); - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertFalse(result.isPresent(), "Entry should be expired with TTL=0"); } @@ -215,11 +215,11 @@ void prefetchAsync_doesNotStartDuplicate_whenKeyAlreadyPresent() throws Exceptio DataSource ds = mockDataSource(1); String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); // first start - cache.prefetchAsync(ds, sql, List.of()); // duplicate – should be ignored + cache.prefetchAsync(ds, "ds1", sql, List.of()); // first start + cache.prefetchAsync(ds, "ds1", sql, List.of()); // duplicate – should be ignored // Retrieve to confirm the entry exists (one execution) - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertTrue(result.isPresent()); } @@ -307,9 +307,9 @@ void prefetchAndGet_cachesClobColumns_asString() throws Exception { DataSource ds = mockDataSourceWithClob(clobContent); String sql = "SELECT description FROM articles LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertTrue(result.isPresent(), "CLOB column query should be cached"); CachedPage page = result.get(); @@ -325,9 +325,9 @@ void prefetchAndGet_cachesNclobColumns_asString() throws Exception { DataSource ds = mockDataSourceWithNClob(nclobContent); String sql = "SELECT content FROM docs LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertTrue(result.isPresent(), "NCLOB column query should be cached"); CachedPage page = result.get(); @@ -359,12 +359,92 @@ void prefetchAndGet_handlesNullClobValue() throws Exception { NextPagePrefetchCache cache = enabledCache(); String sql = "SELECT description FROM t LIMIT 10 OFFSET 10"; - cache.prefetchAsync(ds, sql, List.of()); + cache.prefetchAsync(ds, "ds1", sql, List.of()); - Optional result = cache.getIfReady(sql); + Optional result = cache.getIfReady("ds1", sql); assertTrue(result.isPresent(), "Null CLOB should be cached as null value"); assertFalse(result.get().getRows().isEmpty()); assertNull(result.get().getRows().get(0)[0], "Null CLOB column should be null in cache"); } + + // ---------------------------------------------------------------- + // Datasource isolation + // ---------------------------------------------------------------- + + @Test + void prefetchAndGet_isolatesByDatasourceId() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds1 = mockDataSource(2); + DataSource ds2 = mockDataSource(5); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + + // Prefetch same SQL for two different datasources + cache.prefetchAsync(ds1, "conn-hash-A", sql, List.of()); + cache.prefetchAsync(ds2, "conn-hash-B", sql, List.of()); + + // Each datasource gets its own cache entry + Optional resultA = cache.getIfReady("conn-hash-A", sql); + Optional resultB = cache.getIfReady("conn-hash-B", sql); + + assertTrue(resultA.isPresent(), "Datasource A should have its own cache entry"); + assertTrue(resultB.isPresent(), "Datasource B should have its own cache entry"); + assertEquals(2, resultA.get().getRows().size(), "DS-A should have 2 rows"); + assertEquals(5, resultB.get().getRows().size(), "DS-B should have 5 rows"); + } + + @Test + void getIfReady_withDifferentDatasourceId_missesCache() throws Exception { + NextPagePrefetchCache cache = enabledCache(); + DataSource ds = mockDataSource(1); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "conn-hash-A", sql, List.of()); + + // Asking for the same SQL under a different datasource ID should miss + Optional result = cache.getIfReady("conn-hash-B", sql); + assertFalse(result.isPresent(), + "Cache miss expected: different datasourceId should not match"); + } + + // ---------------------------------------------------------------- + // Background cleanup scheduler + // ---------------------------------------------------------------- + + @Test + void shutdown_doesNotThrow_whenSchedulerNotStarted() { + // cleanupIntervalSeconds=0 → no scheduler started + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 0); + cache.shutdown(); // must not throw + } + + @Test + void shutdown_isIdempotent() { + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 30); + cache.shutdown(); + cache.shutdown(); // second call must not throw + } + + @Test + void backgroundCleanup_evictsExpiredEntries() throws Exception { + // TTL = 0 → all entries expire immediately + // cleanupInterval = 1 second → scheduler will run + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 0, 5000, 1); + DataSource ds = mockDataSource(2); + + String sql = "SELECT id FROM t LIMIT 10 OFFSET 10"; + cache.prefetchAsync(ds, "ds1", sql, List.of()); + + // Wait (with polling) for the background cleanup to reduce the cache size to 0 + long deadline = System.currentTimeMillis() + 5_000; + while (cache.cacheSize() > 0 && System.currentTimeMillis() < deadline) { + Thread.sleep(50); + } + + assertEquals(0, cache.cacheSize(), + "Background cleanup should have evicted the expired entry"); + + cache.shutdown(); + } } From 268cdaab2fd28fa80cf1632ba64ad9ddf4ffa3df Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 18:10:02 +0000 Subject: [PATCH 06/22] fix: single shared static cleanup executor guarantees one background thread across all cache instances Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../server/paging/NextPagePrefetchCache.java | 66 ++++++++++++------- .../paging/NextPagePrefetchCacheTest.java | 2 +- 2 files changed, 45 insertions(+), 23 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 2cf4c8e11..dd50ba8fb 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -23,7 +23,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; /** * Cache for pre-fetched next pages of paginated SELECT queries. @@ -59,9 +61,12 @@ * because those references cannot be transferred to a separate prefetch connection. * *

    Background cleanup

    - * When enabled, a daemon background thread runs every {@code cleanupIntervalSeconds} - * to evict expired or failed entries. Entries expire after {@code ttlSeconds} regardless - * of whether they were ever consumed. Call {@link #shutdown()} to stop the scheduler. + * All cache instances share a single application-wide daemon thread + * ({@link #CLEANUP_EXECUTOR}) that is created once for the lifetime of the JVM. + * When enabled, each instance registers its own periodic eviction task on that shared + * executor; {@link #shutdown()} cancels the task for that instance without affecting + * the shared thread or any other instance's tasks. Entries expire after + * {@code ttlSeconds} regardless of whether they were ever consumed. * *

    Thread safety

    * All public methods are thread-safe. The internal cache uses a @@ -70,21 +75,38 @@ @Slf4j public class NextPagePrefetchCache implements AutoCloseable { + /** + * Application-wide single-threaded executor shared by ALL enabled cache instances. + * Using a {@code static final} field guarantees exactly ONE background cleanup thread + * per JVM regardless of how many {@code NextPagePrefetchCache} instances are created. + * The executor is a daemon so it never prevents JVM shutdown. + */ + private static final ScheduledExecutorService CLEANUP_EXECUTOR = + Executors.newSingleThreadScheduledExecutor(r -> { + Thread t = new Thread(r, "ojp-prefetch-cache-cleanup"); + t.setDaemon(true); + return t; + }); + private final boolean enabled; private final int maxEntries; private final long ttlMs; private final long prefetchWaitTimeoutMs; /** - * Maps {@code "|"} to the asynchronous result of the prefetch. + * Maps {@code "\u0001"} to the asynchronous result of the prefetch. * Including the datasource ID in the key ensures that two different datasources executing * the same SQL do not share cache entries. */ private final ConcurrentHashMap> cache = new ConcurrentHashMap<>(); - /** Background scheduler for periodic eviction of expired/abandoned entries. */ - private final ScheduledExecutorService cleanupScheduler; + /** + * Handle to this instance's eviction task on {@link #CLEANUP_EXECUTOR}. + * {@code null} reference when the cleanup job is disabled ({@code cleanupIntervalSeconds == 0}). + * Cancelled atomically by {@link #shutdown()} to avoid concurrent double-cancel races. + */ + private final AtomicReference> cleanupTask = new AtomicReference<>(); /** * Creates a new cache instance. @@ -95,7 +117,7 @@ public class NextPagePrefetchCache implements AutoCloseable { * @param prefetchWaitTimeoutMs max time (ms) to wait for an in-progress prefetch * before falling back to a live DB query * @param cleanupIntervalSeconds interval (seconds) between background eviction sweeps; - * {@code 0} disables the background job + * {@code 0} disables the background job for this instance */ public NextPagePrefetchCache(boolean enabled, int maxEntries, long ttlSeconds, long prefetchWaitTimeoutMs, @@ -106,17 +128,14 @@ public NextPagePrefetchCache(boolean enabled, int maxEntries, this.prefetchWaitTimeoutMs = prefetchWaitTimeoutMs; if (enabled && cleanupIntervalSeconds > 0) { - this.cleanupScheduler = Executors.newSingleThreadScheduledExecutor(r -> { - Thread t = new Thread(r, "ojp-prefetch-cache-cleanup"); - t.setDaemon(true); - return t; - }); - cleanupScheduler.scheduleAtFixedRate( + // Register this instance's eviction task on the single shared executor. + // The executor has exactly one thread, so all tasks run sequentially on + // that same thread — never more than one cleanup thread in the JVM. + cleanupTask.set(CLEANUP_EXECUTOR.scheduleAtFixedRate( this::evictExpiredOrCompleted, - cleanupIntervalSeconds, cleanupIntervalSeconds, TimeUnit.SECONDS); - log.debug("Prefetch cache cleanup scheduled every {}s", cleanupIntervalSeconds); - } else { - this.cleanupScheduler = null; + cleanupIntervalSeconds, cleanupIntervalSeconds, TimeUnit.SECONDS)); + log.debug("Prefetch cache cleanup registered every {}s on shared executor", + cleanupIntervalSeconds); } } @@ -136,13 +155,16 @@ public int cacheSize() { } /** - * Shuts down the background cleanup scheduler, if one was started. - * Safe to call multiple times. + * Cancels this instance's periodic cleanup task on the shared executor. + * The shared executor itself is left running so that other cache instances + * (if any) are not affected. Safe to call multiple times; uses an atomic + * swap to prevent concurrent double-cancel races. */ public void shutdown() { - if (cleanupScheduler != null && !cleanupScheduler.isShutdown()) { - cleanupScheduler.shutdown(); - log.debug("Prefetch cache cleanup scheduler shut down"); + ScheduledFuture task = cleanupTask.getAndSet(null); + if (task != null) { + task.cancel(false); + log.debug("Prefetch cache cleanup task cancelled"); } } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index 2b89ec3c3..e18723458 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -414,7 +414,7 @@ void getIfReady_withDifferentDatasourceId_missesCache() throws Exception { @Test void shutdown_doesNotThrow_whenSchedulerNotStarted() { - // cleanupIntervalSeconds=0 → no scheduler started + // cleanupIntervalSeconds=0 → no cleanup task registered NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 0); cache.shutdown(); // must not throw } From e953c3032a0d33ba6263af5ef10459538a0b493b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 18:28:17 +0000 Subject: [PATCH 07/22] feat(test): Postgres pagination cache integration test with BYTEA LOB assertion Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .github/workflows/main.yml | 14 +- ...ostgresPaginationCacheIntegrationTest.java | 315 ++++++++++++++++++ ...h_cache_connections_with_record_counts.csv | 5 + 3 files changed, 333 insertions(+), 1 deletion(-) create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index ee407ea44..5726ab3f0 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -220,6 +220,15 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10593 -Dojp.prometheus.port=9163 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.sql.enhancer.enabled=true -Dojp.sql.enhancer.mode=OPTIMIZE -Dojp.sql.enhancer.dialect=POSTGRESQL" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Start third OJP server WITH next-page prefetch cache enabled + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -235,7 +244,7 @@ jobs: # Run PostgreSQL-specific tests with -DenablePostgresTests flag - name: Test (ojp-jdbc-driver) with PostgreSQL enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenablePostgresTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenablePostgresTests=true -DenablePostgresPrefetchCacheTests=true # =================================================================== # SQL Enhancer Integration Test @@ -267,6 +276,9 @@ jobs: echo "" echo "=== OJP Server (with SQL enhancer) log ===" docker logs ojp-server-enhancer 2>&1 || echo "ojp-server-enhancer container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 3: MySQL Integration Tests diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..e4bd69178 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java @@ -0,0 +1,315 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + +/** + * Integration test for the next-page prefetch cache feature with a PostgreSQL backend. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code BYTEA} LOB column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code BYTEA} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class PostgresPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(PostgresPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enablePostgresPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test. + * + *

    The CSV provides five combinations of record count × connection details so that the + * same test method covers: a partial last page (99), exactly one full page (100), + * one full page plus one row (101), a non-round number (567), and a 10-page set (1000). + * + * @param recordCount total rows to insert and paginate over + * @param driverClass fully-qualified OJP driver class (loaded as a side-effect) + * @param url JDBC URL pointing at the prefetch-cache OJP server (port 10594) + * @param user database user + * @param pwd database password + */ + @ParameterizedTest + @CsvFileSource(resources = "/postgres_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeFalse(!isTestEnabled, + "Postgres prefetch-cache tests are disabled " + + "(pass -DenablePostgresPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + // Table name is unique per record-count so parallel executions don't collide + String tableName = "ojp_pfx_pg_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // ------------------------------------------------------------------ + // 1. Setup: fresh table + batch insert + // ------------------------------------------------------------------ + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + // ------------------------------------------------------------------ + // 2. Paginate and assert every value on every row + // ------------------------------------------------------------------ + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + // ------------------------------------------------------------------ + // 3. Cleanup + // ------------------------------------------------------------------ + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INT      PRIMARY KEY      – 1-based row identifier
    +     *   name       VARCHAR  NOT NULL         – "record_{id}"
    +     *   val_int    INT      NOT NULL         – id × 10
    +     *   val_bigint BIGINT   NOT NULL         – id × 1,000,000
    +     *   val_bool   BOOLEAN  NOT NULL         – true when id is even
    +     *   val_text   TEXT     NOT NULL         – "text_value_for_row_{id}"
    +     *   val_bytea  BYTEA    NOT NULL         – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text TEXT NOT NULL," + + " val_bytea BYTEA NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + /** + * Inserts {@code recordCount} rows using a {@link PreparedStatement} batch for efficiency. + */ + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBytea(i)); + ps.addBatch(); + + // Flush in chunks to avoid oversized batches + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + /** + * Queries one page ({@code LIMIT PAGE_SIZE OFFSET offset}), asserts every column value + * for every row on the page, and returns the number of rows actually returned. + */ + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + // BYTEA: the prefetch cache materialises BINARY/VARBINARY as byte[]. + // PostgreSQL JDBC may also represent BYTEA as its hex escape string + // (e.g. "\\x01020304") when retrieved via getObject(); both forms are + // accepted here and compared byte-for-byte. + assertBytea(expectedBytea(id), rs.getObject("val_bytea"), + "val_bytea for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** Drops the test table, ignoring errors (e.g., table does not exist). */ + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + /** + * Returns four deterministic bytes for a given {@code rowId}: + *
      + *
    • byte 0: low 8 bits of rowId
    • + *
    • byte 1: high 8 bits of rowId (bits 8-15)
    • + *
    • byte 2: low 8 bits of (rowId × 3)
    • + *
    • byte 3: low 8 bits of (rowId × 7)
    • + *
    + * All four bytes are different for any rowId in [1, 1000], ensuring that the test + * cannot pass by coincidence on a partial or shuffled result set. + */ + private static byte[] expectedBytea(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } + + /** + * Asserts that {@code actual} (which may be a {@code byte[]} or the PostgreSQL hex-escape + * {@code String} {@code "\\xHH…"}) equals {@code expected} byte-for-byte. + * + * @param expected the expected byte array + * @param actual value returned by {@link ResultSet#getObject(String)} + * @param columnLabel column name used in failure messages + */ + private static void assertBytea(byte[] expected, Object actual, String columnLabel) { + assertNotNull(actual, columnLabel + " must not be null"); + + byte[] actualBytes; + if (actual instanceof byte[]) { + actualBytes = (byte[]) actual; + } else if (actual instanceof String) { + // PostgreSQL JDBC hex-escape format: \x followed by lowercase hex pairs + String s = (String) actual; + if (s.startsWith("\\x") || s.startsWith("\\X")) { + actualBytes = hexStringToBytes(s.substring(2)); + } else { + actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8); + } + } else { + fail(columnLabel + " has unexpected type " + actual.getClass().getName()); + return; // unreachable – suppresses "actualBytes may be uninitialised" warning + } + + assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match"); + } + + /** + * Converts a lower-case hex string (e.g. {@code "0102030a"}) to a {@code byte[]}. + * + * @param hex hex string with an even number of characters and no prefix + * @return decoded byte array + */ + private static byte[] hexStringToBytes(String hex) { + if (hex.isEmpty()) { + return new byte[0]; + } + int len = hex.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(hex.charAt(i), 16) << 4) + + Character.digit(hex.charAt(i + 1), 16)); + } + return data; + } +} diff --git a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..bd7d1f891 --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword From 73a4d186b3c9719041b1a0dd7210ff6b3f0a446f Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 18:39:03 +0000 Subject: [PATCH 08/22] fix(paging): use virtual thread for CLEANUP_EXECUTOR thread factory Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/paging/NextPagePrefetchCache.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index dd50ba8fb..736b38fb9 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -79,14 +79,12 @@ public class NextPagePrefetchCache implements AutoCloseable { * Application-wide single-threaded executor shared by ALL enabled cache instances. * Using a {@code static final} field guarantees exactly ONE background cleanup thread * per JVM regardless of how many {@code NextPagePrefetchCache} instances are created. - * The executor is a daemon so it never prevents JVM shutdown. + * The executor runs on a virtual thread; virtual threads are always daemon threads, + * so they never prevent JVM shutdown. */ private static final ScheduledExecutorService CLEANUP_EXECUTOR = - Executors.newSingleThreadScheduledExecutor(r -> { - Thread t = new Thread(r, "ojp-prefetch-cache-cleanup"); - t.setDaemon(true); - return t; - }); + Executors.newSingleThreadScheduledExecutor(r -> + Thread.ofVirtual().name("ojp-prefetch-cache-cleanup").unstarted(r)); private final boolean enabled; private final int maxEntries; From a2a9726b6a665a3be3f8085e37d0da23ec8a69a9 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 19:01:03 +0000 Subject: [PATCH 09/22] feat(paging): per-datasource prefetchWaitTimeoutMs configuration Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/ServerConfiguration.java | 29 +++++++ .../grpc/server/StatementServiceImpl.java | 13 ++++ .../server/paging/NextPagePrefetchCache.java | 35 ++++++++- .../server/utils/ConnectionHashGenerator.java | 10 ++- .../NextPageCacheConfigurationTest.java | 78 +++++++++++++++++++ .../paging/NextPagePrefetchCacheTest.java | 44 +++++++++++ 6 files changed, 205 insertions(+), 4 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index fde71ccfe..01fdaf18d 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -694,6 +694,35 @@ public long getNextPageCachePrefetchWaitTimeoutMs() { return nextPageCachePrefetchWaitTimeoutMs; } + /** + * Returns the prefetch-wait timeout for a specific datasource. + * + *

    If a per-datasource override is configured via + * {@code ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs}, + * that value is returned. Otherwise the global + * {@code ojp.server.nextPageCache.prefetchWaitTimeoutMs} is used as the fallback.

    + * + * @param datasourceName the {@code ojp.datasource.name} value from the client connection + * properties; {@code null} or {@code "default"} always returns + * the global default + * @return the effective prefetch-wait timeout in milliseconds for the given datasource + */ + public long getNextPageCachePrefetchWaitTimeoutMs(String datasourceName) { + if (datasourceName != null && !datasourceName.isEmpty() && !"default".equals(datasourceName)) { + String perDatasourceKey = "ojp.server.nextPageCache.datasource." + datasourceName + + ".prefetchWaitTimeoutMs"; + String raw = getStringProperty(perDatasourceKey, null); + if (raw != null) { + try { + return Long.parseLong(raw); + } catch (NumberFormatException e) { + logger.warn("Invalid value for '{}': '{}', falling back to global default", perDatasourceKey, raw); + } + } + } + return nextPageCachePrefetchWaitTimeoutMs; + } + public long getNextPageCacheCleanupIntervalSeconds() { return nextPageCacheCleanupIntervalSeconds; } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index a5b387fdc..7c3202848 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -83,6 +83,7 @@ public class StatementServiceImpl extends StatementServiceGrpc.StatementServiceI // Next-page prefetch cache for paginated queries (disabled by default) private final NextPagePrefetchCache nextPagePrefetchCache; + private final ServerConfiguration serverConfiguration; // Multinode XA coordinator for distributing transaction limits private static final MultinodeXaCoordinator xaCoordinator = new MultinodeXaCoordinator(); @@ -99,6 +100,7 @@ public StatementServiceImpl(SessionManager sessionManager, CircuitBreakerRegistr ServerConfiguration serverConfiguration) { this.sessionManager = sessionManager; this.circuitBreakerRegistry = circuitBreakerRegistry; + this.serverConfiguration = serverConfiguration; // Server configuration for creating segregation managers this.sqlEnhancerEngine = new org.openjproxy.grpc.server.sql.SqlEnhancerEngine( serverConfiguration.isSqlEnhancerEnabled()); @@ -213,6 +215,17 @@ private void initializeXAPoolProvider() { @Override public void connect(ConnectionDetails connectionDetails, StreamObserver responseObserver) { + // Register per-datasource prefetch wait timeout so that getIfReady() uses the + // correct timeout for this datasource rather than the global default. + if (nextPagePrefetchCache.isEnabled()) { + String connHash = org.openjproxy.grpc.server.utils.ConnectionHashGenerator + .hashConnectionDetails(connectionDetails); + String datasourceName = org.openjproxy.grpc.server.utils.ConnectionHashGenerator + .extractDataSourceName(connectionDetails); + long perDatasourceTimeout = serverConfiguration + .getNextPageCachePrefetchWaitTimeoutMs(datasourceName); + nextPagePrefetchCache.registerDatasourcePrefetchWaitTimeout(connHash, perDatasourceTimeout); + } org.openjproxy.grpc.server.action.connection.ConnectAction.getInstance() .execute(actionContext, connectionDetails, responseObserver); } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 736b38fb9..549d6e153 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -91,6 +91,15 @@ public class NextPagePrefetchCache implements AutoCloseable { private final long ttlMs; private final long prefetchWaitTimeoutMs; + /** + * Per-datasource prefetch-wait timeout overrides. + * Key: datasource connection hash (see {@code ConnectionHashGenerator}). + * Value: timeout in milliseconds. + * When an entry is present it takes precedence over {@link #prefetchWaitTimeoutMs}. + */ + private final ConcurrentHashMap datasourcePrefetchWaitTimeoutMs + = new ConcurrentHashMap<>(); + /** * Maps {@code "\u0001"} to the asynchronous result of the prefetch. * Including the datasource ID in the key ensures that two different datasources executing @@ -152,6 +161,25 @@ public int cacheSize() { return cache.size(); } + /** + * Registers a per-datasource prefetch-wait timeout that overrides the global default + * for the specified datasource. + * + *

    Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.

    + * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param timeoutMs the maximum time in milliseconds to wait for an in-progress + * prefetch before falling back to a live DB query + */ + public void registerDatasourcePrefetchWaitTimeout(String datasourceId, long timeoutMs) { + if (datasourceId != null) { + datasourcePrefetchWaitTimeoutMs.put(datasourceId, timeoutMs); + log.debug("Registered per-datasource prefetchWaitTimeoutMs={} for datasourceId={}", + timeoutMs, datasourceId); + } + } + /** * Cancels this instance's periodic cleanup task on the shared executor. * The shared executor itself is left running so that other cache instances @@ -204,8 +232,11 @@ public Optional getIfReady(String datasourceId, String sql) { return Optional.empty(); } + long effectiveTimeoutMs = datasourcePrefetchWaitTimeoutMs.getOrDefault( + datasourceId, prefetchWaitTimeoutMs); + try { - CachedPage page = future.get(prefetchWaitTimeoutMs, TimeUnit.MILLISECONDS); + CachedPage page = future.get(effectiveTimeoutMs, TimeUnit.MILLISECONDS); // Remove after use (single-use semantics; if another thread also grabs // the same entry concurrently, it gets a copy of the same data). cache.remove(key, future); @@ -223,7 +254,7 @@ public Optional getIfReady(String datasourceId, String sql) { } catch (java.util.concurrent.TimeoutException e) { log.debug("Prefetch for '{}' did not complete within {}ms – falling back to live query", - abbreviate(sql), prefetchWaitTimeoutMs); + abbreviate(sql), effectiveTimeoutMs); return Optional.empty(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java index b8a96f366..47594608b 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java @@ -44,9 +44,15 @@ public static String hashConnectionDetails(ConnectionDetails connectionDetails) /** * Extracts the dataSource name from connection details properties. - * Returns "default" if no dataSource name is specified. + * Returns {@code "default"} if no dataSource name is specified. + * + *

    The dataSource name corresponds to the {@code ojp.datasource.name} property + * set in the client connection properties.

    + * + * @param connectionDetails the connection details whose properties to inspect + * @return the datasource name, or {@code "default"} when none is set */ - private static String extractDataSourceName(ConnectionDetails connectionDetails) { + public static String extractDataSourceName(ConnectionDetails connectionDetails) { if (connectionDetails.getPropertiesList().isEmpty()) { return "default"; } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index 4721aa3f3..7a72b75d9 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -186,4 +186,82 @@ void defaultCleanupInterval_is60Seconds() { void defaultTtlSeconds_is60Seconds() { assertEquals(60L, ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void perDatasource_prefetchWaitTimeoutMs_isRespected() { + System.setProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs", "1500"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1500L, config.getNextPageCachePrefetchWaitTimeoutMs("my-db")); + + System.clearProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs"); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_whenNotSet() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "8000"); + + ServerConfiguration config = new ServerConfiguration(); + + // Datasource "unknown" has no per-datasource property set + assertEquals(8000L, config.getNextPageCachePrefetchWaitTimeoutMs("unknown-ds")); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forNullName() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "3000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(3000L, config.getNextPageCachePrefetchWaitTimeoutMs(null)); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forDefaultName() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "4000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(4000L, config.getNextPageCachePrefetchWaitTimeoutMs("default")); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_invalidPrefetchWaitTimeout_fallsBackToGlobalDefault() { + System.setProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs", "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs("bad-ds")); + + System.clearProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs"); + } + + @Test + void perDatasource_multipleOverrides_areIndependent() { + System.setProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs", "1000"); + System.setProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs", "2000"); + System.setProperty(WAIT_TIMEOUT_MS_KEY, "9000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-a")); + assertEquals(2000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-b")); + assertEquals(9000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-c")); // falls back to global + + System.clearProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs"); + System.clearProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs"); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index e18723458..e0c5b22ce 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -447,4 +447,48 @@ void backgroundCleanup_evictsExpiredEntries() throws Exception { cache.shutdown(); } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void registerDatasourcePrefetchWaitTimeout_ignoresNullId() { + NextPagePrefetchCache cache = enabledCache(); + // Null datasourceId should be silently ignored (no NullPointerException) + cache.registerDatasourcePrefetchWaitTimeout(null, 1000); + } + + @Test + void getIfReady_usesPerDatasourceTimeout_whenRegistered() throws Exception { + // enabled, maxEntries=100, ttlSeconds=60, globalTimeoutMs=1, cleanupInterval=0 (disabled) + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 1, 0); // global: 1ms + cache.registerDatasourcePrefetchWaitTimeout("ds-custom", 5_000); // per-ds: 5 s + + DataSource ds = mockDataSource(3); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "ds-custom", sql, List.of()); + + Optional result = cache.getIfReady("ds-custom", sql); + + assertTrue(result.isPresent(), "Cache hit expected with per-datasource timeout"); + assertEquals(3, result.get().getRows().size()); + } + + @Test + void registerDatasourcePrefetchWaitTimeout_replacesExistingValue() throws Exception { + // enabled, maxEntries=100, ttlSeconds=60, globalTimeoutMs=9999, cleanupInterval=0 (disabled) + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 9_999, 0); + + cache.registerDatasourcePrefetchWaitTimeout("ds-x", 1_000); + cache.registerDatasourcePrefetchWaitTimeout("ds-x", 2_000); // replace + + // Exercise getIfReady to confirm the updated timeout is used without error + DataSource ds = mockDataSource(1); + String sql = "SELECT id FROM t LIMIT 5 OFFSET 0"; + cache.prefetchAsync(ds, "ds-x", sql, List.of()); + + Optional result = cache.getIfReady("ds-x", sql); + assertTrue(result.isPresent()); + } } From 1d3d878a29efbb00086c288084b1f7a772b1fbca Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 19:19:29 +0000 Subject: [PATCH 10/22] fix(jdbc): skip remote close() when ResultSet has no server-side UUID (cache serve fix) Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../src/main/java/org/openjproxy/jdbc/ResultSet.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java index f573e2b96..8836dd784 100644 --- a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java +++ b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/ResultSet.java @@ -132,6 +132,12 @@ public void close() throws SQLException { this.blockIdx = null; this.itResults = null; this.currentDataBlock = null; + // When the result set was served directly from the prefetch cache there is no + // server-side ResultSet object registered, so skip the remote close call. + String uuid = this.getResultSetUUID(); + if (uuid == null || uuid.isBlank()) { + return; + } //If the parent statement is closed the result set is closed already, attempting to close it again would produce an error. if (this.statement == null || !this.statement.isClosed()) { super.close(); From 59ca6424f78cc42374b563c1a53858ae39610feb Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 19:53:07 +0000 Subject: [PATCH 11/22] =?UTF-8?q?fix(paging):=20fix=20Sonar=20issues=20?= =?UTF-8?q?=E2=80=94=20resource=20leaks,=20dead=20code,=20unused=20imports?= =?UTF-8?q?,=20condition=20negation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- ...ostgresPaginationCacheIntegrationTest.java | 9 +++----- .../server/paging/NextPagePrefetchCache.java | 22 ++++++++++--------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java index e4bd69178..0e85469c6 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java @@ -12,14 +12,12 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.ArrayList; -import java.util.List; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Integration test for the next-page prefetch cache feature with a PostgreSQL backend. @@ -78,7 +76,7 @@ void testPaginationWithPrefetchCache(int recordCount, String driverClass, String url, String user, String pwd) throws SQLException, ClassNotFoundException { - assumeFalse(!isTestEnabled, + assumeTrue(isTestEnabled, "Postgres prefetch-cache tests are disabled " + "(pass -DenablePostgresPrefetchCacheTests=true to enable)"); @@ -287,8 +285,7 @@ private static void assertBytea(byte[] expected, Object actual, String columnLab actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8); } } else { - fail(columnLabel + " has unexpected type " + actual.getClass().getName()); - return; // unreachable – suppresses "actualBytes may be uninitialised" warning + actualBytes = fail(columnLabel + " has unexpected type " + actual.getClass().getName()); } assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match"); diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 549d6e153..a99904fa1 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -351,17 +351,19 @@ public void prefetchAsync(DataSource dataSource, String datasourceId, */ private static CachedPage executeAndReadAllRows(Connection conn, String sql, List params) throws SQLException { - ResultSet rs; if (params.isEmpty()) { - Statement stmt = conn.createStatement(); - rs = stmt.executeQuery(sql); + try (Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(sql)) { + return readAllRows(rs); + } } else { - PreparedStatement ps = conn.prepareStatement(sql); - setNonLobParameters(ps, params); - rs = ps.executeQuery(); + try (PreparedStatement ps = conn.prepareStatement(sql)) { + setNonLobParameters(ps, params); + try (ResultSet rs = ps.executeQuery()) { + return readAllRows(rs); + } + } } - - return readAllRows(rs); } /** @@ -409,8 +411,8 @@ private static Object readColumnValue(ResultSet rs, int col, int sqlType) throws if (blob == null) { return null; } - try { - return blob.getBinaryStream().readAllBytes(); + try (java.io.InputStream stream = blob.getBinaryStream()) { + return stream.readAllBytes(); } catch (java.io.IOException e) { throw new SQLException("Failed to read BLOB data", e); } From 8c71a396dafa2279f9fd962e98eea21d5e4fff41 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 20:18:36 +0000 Subject: [PATCH 12/22] =?UTF-8?q?fix(paging):=20fix=20remaining=20Sonar=20?= =?UTF-8?q?issues=20=E2=80=94=20duplicate=20conditions,=20regex=20compilat?= =?UTF-8?q?ion,=20boolean=20pattern,=20catch(Exception)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../grpc/server/StatementServiceImpl.java | 11 ++++------- .../server/paging/NextPagePrefetchCache.java | 6 +++++- .../grpc/server/paging/PaginationDetector.java | 18 ++++++++++++------ .../server/utils/ConnectionHashGenerator.java | 12 +++--------- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index 7c3202848..dd74cc3bd 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -389,15 +389,11 @@ private static void streamCachedPage(CachedPage page, SessionInfo session, queryResultBuilder.labels(page.getColumnLabels()); List batch = new ArrayList<>(); - int row = 0; - boolean justSent = false; + int totalRows = page.getRows().size(); for (Object[] rowValues : page.getRows()) { - justSent = false; - row++; batch.add(rowValues); - if (row % CommonConstants.ROWS_PER_RESULT_SET_DATA_BLOCK == 0) { - justSent = true; + if (batch.size() == CommonConstants.ROWS_PER_RESULT_SET_DATA_BLOCK) { responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, queryResultBuilder, null, "")); queryResultBuilder = OpQueryResult.builder(); @@ -405,7 +401,8 @@ private static void streamCachedPage(CachedPage page, SessionInfo session, } } - if (!justSent) { + // Send remaining rows, or an empty batch when there are no rows at all + if (!batch.isEmpty() || totalRows == 0) { responseObserver.onNext(ResultSetWrapper.wrapResults(session, batch, queryResultBuilder, null, "")); } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index a99904fa1..849796b4f 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -26,6 +26,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; /** * Cache for pre-fetched next pages of paginated SELECT queries. @@ -86,6 +87,9 @@ public class NextPagePrefetchCache implements AutoCloseable { Executors.newSingleThreadScheduledExecutor(r -> Thread.ofVirtual().name("ojp-prefetch-cache-cleanup").unstarted(r)); + /** Pre-compiled pattern for stripping newlines and tabs in log abbreviations. */ + private static final Pattern NEWLINE_PATTERN = Pattern.compile("[\\r\\n\\t]+"); + private final boolean enabled; private final int maxEntries; private final long ttlMs; @@ -520,7 +524,7 @@ private static String abbreviate(String sql, int maxLen) { return ""; } // Remove newlines/tabs for single-line thread names - String singleLine = sql.replaceAll("[\\r\\n\\t]+", " ").trim(); + String singleLine = NEWLINE_PATTERN.matcher(sql).replaceAll(" ").trim(); return singleLine.length() <= maxLen ? singleLine : singleLine.substring(0, maxLen - 3) + "..."; } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java index 3e77f320d..4ccf593bf 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java @@ -115,9 +115,12 @@ public static Optional detect(String sql) { return Optional.of(new PageInfo(offset, limit)); } + // Patterns 4 and 5 only apply when the query has no OFFSET clause at all. + // Evaluate once and reuse the result. + boolean noOffset = !HAS_OFFSET.matcher(sql).find(); + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY (first page, offset = 0) - // Only match when there is no OFFSET clause in the same query - if (!HAS_OFFSET.matcher(sql).find()) { + if (noOffset) { Matcher m4 = FETCH_ONLY.matcher(sql); if (m4.find()) { long fetchSize = Long.parseLong(m4.group(1)); @@ -126,8 +129,7 @@ public static Optional detect(String sql) { } // Pattern 5: standalone LIMIT n (first page, offset = 0) - // Only match when there is no OFFSET clause in the same query - if (!HAS_OFFSET.matcher(sql).find()) { + if (noOffset) { Matcher m5 = LIMIT_ONLY.matcher(sql); if (m5.find()) { long limit = Long.parseLong(m5.group(1)); @@ -177,8 +179,12 @@ public static String buildNextPageSql(String sql, PageInfo pageInfo) { return sql.substring(0, m3.start(1)) + nextOffset + sql.substring(m3.end(1)); } + // Patterns 4 and 5 only apply when the query has no OFFSET clause at all. + // Evaluate once and reuse the result. + boolean noOffset = !HAS_OFFSET.matcher(sql).find(); + // Pattern 4: FETCH FIRST/NEXT n ROWS ONLY without OFFSET → insert OFFSET before FETCH - if (!HAS_OFFSET.matcher(sql).find()) { + if (noOffset) { Matcher m4 = FETCH_ONLY.matcher(sql); if (m4.find()) { int fetchStart = m4.start(); @@ -189,7 +195,7 @@ public static String buildNextPageSql(String sql, PageInfo pageInfo) { } // Pattern 5: standalone LIMIT n → append OFFSET n - if (!HAS_OFFSET.matcher(sql).find()) { + if (noOffset) { Matcher m5 = LIMIT_ONLY.matcher(sql); if (m5.find()) { return sql + " OFFSET " + nextOffset; diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java index 47594608b..6e58913e5 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/utils/ConnectionHashGenerator.java @@ -56,14 +56,8 @@ public static String extractDataSourceName(ConnectionDetails connectionDetails) if (connectionDetails.getPropertiesList().isEmpty()) { return "default"; } - - try { - Map properties = ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); - Object dataSourceName = properties.get("ojp.datasource.name"); - return dataSourceName != null ? dataSourceName.toString() : "default"; - } catch (Exception e) { - // If we can't deserialize properties, fall back to default - return "default"; - } + Map properties = ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); + Object dataSourceName = properties.get("ojp.datasource.name"); + return dataSourceName != null ? dataSourceName.toString() : "default"; } } \ No newline at end of file From 0293f8f78e68dadb5dc8b8ee6e9042ba09b81a3c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 20:50:12 +0000 Subject: [PATCH 13/22] test: parameterize detection tests, add assertion, fix duplicate @AfterEach Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../NextPageCacheConfigurationTest.java | 6 +- .../paging/NextPagePrefetchCacheTest.java | 2 +- .../server/paging/PaginationDetectorTest.java | 90 +++++-------------- 3 files changed, 24 insertions(+), 74 deletions(-) diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index 7a72b75d9..1ecc7d837 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -30,11 +30,7 @@ void clearProperties() { @AfterEach void cleanupProperties() { - System.clearProperty(ENABLED_KEY); - System.clearProperty(TTL_KEY); - System.clearProperty(MAX_ENTRIES_KEY); - System.clearProperty(WAIT_TIMEOUT_MS_KEY); - System.clearProperty(CLEANUP_INTERVAL_KEY); + clearProperties(); } // ---------------------------------------------------------------- diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index e0c5b22ce..3197f8a4e 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -127,7 +127,7 @@ void prefetchAsync_doesNothing_whenSqlIsNull() throws Exception { NextPagePrefetchCache cache = enabledCache(); DataSource ds = mockDataSource(1); cache.prefetchAsync(ds, "ds1", null, List.of()); - // Nothing to assert – just must not throw + assertEquals(0, cache.cacheSize(), "Cache should remain empty when SQL is null"); } // ---------------------------------------------------------------- diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java index d992e48b7..49ba81b61 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java @@ -42,76 +42,30 @@ void detectLimitOffset_firstPage() { assertTrue(result.get().isFirstPage()); } - @Test - void detectOffsetFetch_sqlServer() { - String sql = "SELECT id, name FROM users ORDER BY id OFFSET 30 ROWS FETCH NEXT 10 ROWS ONLY"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(30, result.get().getCurrentOffset()); - assertEquals(10, result.get().getPageSize()); - } - - @Test - void detectOffsetFetch_fetchFirst() { - String sql = "SELECT * FROM items OFFSET 0 ROWS FETCH FIRST 50 ROWS ONLY"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(0, result.get().getCurrentOffset()); - assertEquals(50, result.get().getPageSize()); - } - - @Test - void detectLimitComma_mysqlShorthand() { - // MySQL: LIMIT offset, pageSize (first arg = rows to skip, second = rows to return) - String sql = "SELECT * FROM products LIMIT 20, 10"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(20, result.get().getCurrentOffset()); - assertEquals(10, result.get().getPageSize()); - } - - @Test - void detectFetchOnly_noOffset_firstPage() { - String sql = "SELECT TOP_N.* FROM (SELECT * FROM t) TOP_N FETCH FIRST 10 ROWS ONLY"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(0, result.get().getCurrentOffset()); - assertEquals(10, result.get().getPageSize()); - assertTrue(result.get().isFirstPage()); - } - - @Test - void detectFetchNextOnly_noOffset_firstPage() { - String sql = "SELECT * FROM t FETCH NEXT 5 ROWS ONLY"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(0, result.get().getCurrentOffset()); - assertEquals(5, result.get().getPageSize()); - } - - @Test - void detectLimitOnly_noOffset_firstPage() { - String sql = "SELECT * FROM users WHERE active = 1 LIMIT 15"; - Optional result = PaginationDetector.detect(sql); - - assertTrue(result.isPresent()); - assertEquals(0, result.get().getCurrentOffset()); - assertEquals(15, result.get().getPageSize()); - } - - @Test - void detectLimitOffset_caseInsensitive() { - String sql = "select id from foo limit 5 offset 10"; + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + // SQL Server / Oracle: OFFSET … ROWS FETCH NEXT … ROWS ONLY + "'SELECT id, name FROM users ORDER BY id OFFSET 30 ROWS FETCH NEXT 10 ROWS ONLY', 10, 30, 40", + // FETCH FIRST … ROWS ONLY with explicit OFFSET 0 + "'SELECT * FROM items OFFSET 0 ROWS FETCH FIRST 50 ROWS ONLY', 50, 0, 50", + // MySQL shorthand: LIMIT offset, pageSize + "'SELECT * FROM products LIMIT 20, 10', 10, 20, 30", + // FETCH FIRST … ROWS ONLY without any OFFSET (first page) + "'SELECT TOP_N.* FROM (SELECT * FROM t) TOP_N FETCH FIRST 10 ROWS ONLY', 10, 0, 10", + // FETCH NEXT … ROWS ONLY without any OFFSET (first page) + "'SELECT * FROM t FETCH NEXT 5 ROWS ONLY', 5, 0, 5", + // Standalone LIMIT without OFFSET (first page) + "'SELECT * FROM users WHERE active = 1 LIMIT 15', 15, 0, 15", + // Case-insensitive matching + "'select id from foo limit 5 offset 10', 5, 10, 15" + }) + void detect_recognisesPaginationPatterns(String sql, long pageSize, long currentOffset, long nextOffset) { Optional result = PaginationDetector.detect(sql); - assertTrue(result.isPresent()); - assertEquals(10, result.get().getCurrentOffset()); - assertEquals(5, result.get().getPageSize()); + assertTrue(result.isPresent(), "Expected pagination to be detected in: " + sql); + assertEquals(pageSize, result.get().getPageSize(), "Page size mismatch"); + assertEquals(currentOffset, result.get().getCurrentOffset(), "Current offset mismatch"); + assertEquals(nextOffset, result.get().getNextPageOffset(), "Next offset mismatch"); } // ---------------------------------------------------------------- From fab19cd758b256332c12991dd713fc723d77d449 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 21:52:37 +0000 Subject: [PATCH 14/22] test: address review comments on test classes (round 2) Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../NextPageCacheConfigurationTest.java | 36 +++++-------------- .../paging/NextPagePrefetchCacheTest.java | 9 +++-- 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index 1ecc7d837..240be1e06 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -3,6 +3,9 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.NullSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -198,36 +201,15 @@ void perDatasource_prefetchWaitTimeoutMs_isRespected() { System.clearProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs"); } - @Test - void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_whenNotSet() { - System.setProperty(WAIT_TIMEOUT_MS_KEY, "8000"); - - ServerConfiguration config = new ServerConfiguration(); - - // Datasource "unknown" has no per-datasource property set - assertEquals(8000L, config.getNextPageCachePrefetchWaitTimeoutMs("unknown-ds")); - - System.clearProperty(WAIT_TIMEOUT_MS_KEY); - } - - @Test - void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forNullName() { - System.setProperty(WAIT_TIMEOUT_MS_KEY, "3000"); - - ServerConfiguration config = new ServerConfiguration(); - - assertEquals(3000L, config.getNextPageCachePrefetchWaitTimeoutMs(null)); - - System.clearProperty(WAIT_TIMEOUT_MS_KEY); - } - - @Test - void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forDefaultName() { - System.setProperty(WAIT_TIMEOUT_MS_KEY, "4000"); + @ParameterizedTest + @NullSource + @ValueSource(strings = {"unknown-ds", "default"}) + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_whenNoPerDatasourcePropertySet(String datasourceName) { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "5000"); ServerConfiguration config = new ServerConfiguration(); - assertEquals(4000L, config.getNextPageCachePrefetchWaitTimeoutMs("default")); + assertEquals(5000L, config.getNextPageCachePrefetchWaitTimeoutMs(datasourceName)); System.clearProperty(WAIT_TIMEOUT_MS_KEY); } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index 3197f8a4e..dbf0ec666 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -199,7 +199,7 @@ void getIfReady_returnsEmpty_whenEntryExpired() throws Exception { cache.prefetchAsync(ds, "ds1", sql, List.of()); // Wait a bit to ensure the prefetch completes and the entry is expired - Thread.sleep(50); + Thread.sleep(50); //NOSONAR Optional result = cache.getIfReady("ds1", sql); assertFalse(result.isPresent(), "Entry should be expired with TTL=0"); @@ -236,7 +236,7 @@ void cachedPage_isNotExpired_whenJustCreated() { @Test void cachedPage_isExpired_withZeroTtl() throws Exception { CachedPage page = new CachedPage(List.of("col"), List.of()); - Thread.sleep(10); // small delay so currentTime > createdAt + Thread.sleep(10); //NOSONAR - small delay so currentTime > createdAt assertTrue(page.isExpired(0), "Page should be expired with TTL=0"); } @@ -417,6 +417,7 @@ void shutdown_doesNotThrow_whenSchedulerNotStarted() { // cleanupIntervalSeconds=0 → no cleanup task registered NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 0); cache.shutdown(); // must not throw + assertEquals(0, cache.cacheSize(), "Cache should remain empty after shutdown"); } @Test @@ -424,6 +425,7 @@ void shutdown_isIdempotent() { NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 5000, 30); cache.shutdown(); cache.shutdown(); // second call must not throw + assertEquals(0, cache.cacheSize(), "Cache should remain empty after double shutdown"); } @Test @@ -439,7 +441,7 @@ void backgroundCleanup_evictsExpiredEntries() throws Exception { // Wait (with polling) for the background cleanup to reduce the cache size to 0 long deadline = System.currentTimeMillis() + 5_000; while (cache.cacheSize() > 0 && System.currentTimeMillis() < deadline) { - Thread.sleep(50); + Thread.sleep(50); //NOSONAR } assertEquals(0, cache.cacheSize(), @@ -457,6 +459,7 @@ void registerDatasourcePrefetchWaitTimeout_ignoresNullId() { NextPagePrefetchCache cache = enabledCache(); // Null datasourceId should be silently ignored (no NullPointerException) cache.registerDatasourcePrefetchWaitTimeout(null, 1000); + assertEquals(0, cache.cacheSize(), "Cache should remain empty when datasourceId is null"); } @Test From 76f8de7b4bdc597d8844687068be1c096479b68f Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Mar 2026 22:57:13 +0000 Subject: [PATCH 15/22] docs: add prefetch cache feature docs and refactor buildNextPageSql tests to single @ParameterizedTest Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- documents/README.md | 8 + .../configuration/ojp-server-configuration.md | 56 +++++++ .../part2-chapter6-server-configuration.md | 104 +++++++++++- .../features/NEXT_PAGE_PREFETCH_CACHE.md | 152 ++++++++++++++++++ .../server/paging/PaginationDetectorTest.java | 105 +++--------- 5 files changed, 338 insertions(+), 87 deletions(-) create mode 100644 documents/features/NEXT_PAGE_PREFETCH_CACHE.md diff --git a/documents/README.md b/documents/README.md index d09d01014..67391cfba 100644 --- a/documents/README.md +++ b/documents/README.md @@ -56,6 +56,13 @@ Located in [connection-pool/](connection-pool/): Located in [analysis/](analysis/): - [Transaction Isolation Handling](analysis/TRANSACTION_ISOLATION_HANDLING.md) - Complete technical documentation on transaction isolation reset behavior +## Features + +Located in [features/](features/): +- [Next-Page Prefetch Cache](features/NEXT_PAGE_PREFETCH_CACHE.md) - Transparent background pre-fetching of the next query page to eliminate round-trip latency in paginated result sets +- [SQL Enhancer Engine Quickstart](features/SQL_ENHANCER_ENGINE_QUICKSTART.md) - SQL optimisation using Apache Calcite (experimental) +- [SQL Enhancer Configuration Examples](features/SQL_ENHANCER_CONFIGURATION_EXAMPLES.md) - Configuration examples for the SQL enhancer + ## Database Setup Guides Located in [environment-setup/](environment-setup/): @@ -158,6 +165,7 @@ documents/ ├── contributor-badges/ # Recognition program ├── designs/ # Design documents ├── environment-setup/ # Database setup guides +├── features/ # Feature guides and documentation ├── fixed-issues/ # Issue fix documentation ├── guides/ # Developer guides ├── images/ # Diagrams and images diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index 48cd1dd1d..444e7a944 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -160,6 +160,62 @@ For full integration examples including Docker Compose setups, see the **[Teleme | `ojp.server.slowQuerySegregation.slowSlotTimeout` | `OJP_SERVER_SLOWQUERYSEGREGATION_SLOWSLOTTIMEOUT` | long | 120000 | Timeout for acquiring slow operation slots (ms) | 0.2.0-beta | | `ojp.server.slowQuerySegregation.fastSlotTimeout` | `OJP_SERVER_SLOWQUERYSEGREGATION_FASTSLOTTIMEOUT` | long | 60000 | Timeout for acquiring fast operation slots (ms) | 0.2.0-beta | +### Next-Page Prefetch Cache Settings + +The prefetch cache transparently pre-executes the **next page query** in the background while the current page is being sent to the client. When the client requests the next page, the rows are served from memory instead of hitting the database again, significantly reducing perceived latency for paginated result sets. + +The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`). No client changes are needed — the feature is entirely transparent. + +| Property | Environment Variable | Type | Default | Description | Since | +|---|---|---|---|---|---| +| `ojp.server.nextPageCache.enabled` | `OJP_SERVER_NEXTPAGECACHE_ENABLED` | boolean | false | Enable/disable the next-page prefetch cache | 0.4.1 | +| `ojp.server.nextPageCache.ttlSeconds` | `OJP_SERVER_NEXTPAGECACHE_TTLSECONDS` | long | 60 | Maximum time (seconds) a cached page is kept before being discarded | 0.4.1 | +| `ojp.server.nextPageCache.maxEntries` | `OJP_SERVER_NEXTPAGECACHE_MAXENTRIES` | int | 100 | Maximum number of cache entries across all datasources | 0.4.1 | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS` | long | 5000 | Maximum time (ms) to wait for a prefetch to complete before falling back to a live query | 0.4.1 | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS` | long | 60 | Interval (seconds) at which the background cleanup thread evicts expired entries | 0.4.1 | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(no env-var equivalent)* | long | *(global default)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` on the client | 0.4.1 | + +#### Next-Page Prefetch Cache Configuration Examples + +**Enable the cache with default settings:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**Enable with custom TTL and wait timeout:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.ttlSeconds=30 \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -jar ojp-server.jar +``` + +**Per-datasource wait timeout override:** +```bash +# Give the "analytics" datasource more time to prefetch large pages +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -D"ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000" \ + -jar ojp-server.jar +``` + +**Via environment variables:** +```bash +export OJP_SERVER_NEXTPAGECACHE_ENABLED=true +export OJP_SERVER_NEXTPAGECACHE_TTLSECONDS=60 +export OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS=5000 +export OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS=60 +java -Duser.timezone=UTC -jar ojp-server.jar +``` + +> **ℹ️ Cache isolation**: Entries are keyed by `datasourceId + normalizedSQL`, so two datasources executing the same query never share cached data. + +> **ℹ️ Background cleanup**: A single shared virtual thread (`ojp-prefetch-cache-cleanup`) runs the eviction scan at the configured interval. No additional threads are created regardless of how many datasources are active. + ### SQL Enhancer and Schema Loader Settings > **⚠️ EXPERIMENTAL FEATURE - NOT RECOMMENDED FOR PRODUCTION** diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index d4d8be48d..5ea38514e 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -402,6 +402,106 @@ graph LR E --> C ``` +## 6.8 Next-Page Prefetch Cache + +For applications that page through query results — common in reporting, data exports, and list views — OJP can dramatically reduce latency by pre-executing the **next page query in the background** while the current page is being delivered to the client. When the client then requests the next page, the rows are served directly from memory instead of making a round-trip to the database. + +### How It Works + +OJP automatically detects SQL pagination clauses in the queries your application already writes. There are no client-side changes needed — the feature is fully transparent. Supported pagination patterns include: + +| SQL Pattern | Example | +|---|---| +| `LIMIT n OFFSET m` | `SELECT * FROM orders LIMIT 100 OFFSET 200` | +| `OFFSET m ROWS FETCH NEXT n ROWS ONLY` | SQL Server, Oracle | +| `FETCH FIRST n ROWS ONLY` | DB2, Oracle | +| `LIMIT m, n` | MySQL shorthand | +| Standalone `LIMIT n` | First-page query without OFFSET | + +```mermaid +flowchart TD + A([Client requests page N]) --> B{Cache hit?} + B -- Yes --> C[Serve rows from memory] + B -- No --> D[Execute page N SQL against DB] + D --> E[Stream rows to client] + E --> F{SQL is paginated?} + F -- No --> Z([Done]) + F -- Yes --> G[Rewrite SQL for page N+1] + G --> H[Start background virtual thread] + H --> I[(Execute page N+1 query against DB)] + I --> J[Materialise all rows in memory] + J --> K[Store in cache keyed by datasource + SQL] + C --> L[Remove entry from cache] + L --> F +``` + +The cache key combines the datasource identifier and the normalised SQL text, so two datasources running identical queries never see each other's cached data. + +### Configuration + +The prefetch cache is **disabled by default**. Enable it with a single property: + +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**All prefetch cache settings:** + +| Property | Default | Description | +|---|---|---| +| `ojp.server.nextPageCache.enabled` | `false` | Enable/disable the feature | +| `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age (seconds) of a cached page before eviction | +| `ojp.server.nextPageCache.maxEntries` | `100` | Maximum number of in-memory cache entries | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum time (ms) to wait for a prefetch to complete; falls back to a live query on timeout | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for the wait timeout (`` matches `ojp.datasource.name` on the client) | + +### Per-Datasource Wait Timeout + +Different datasources may have different response-time characteristics. A fast OLTP datasource might need only 1 second, while a heavy analytics datasource might need 10 seconds: + +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=2000 \ + -D"ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000" \ + -jar ojp-server.jar +``` + +### Background Cleanup + +A single virtual thread named `ojp-prefetch-cache-cleanup` runs the eviction sweep on a fixed interval, removing entries that are either expired (older than `ttlSeconds`) or abandoned (prefetch still in-flight past the TTL). Only one cleanup thread ever exists per JVM, regardless of how many datasources are active. + +```mermaid +flowchart TD + BOOT([JVM starts]) --> EX[Create shared CLEANUP_EXECUTOR\none virtual thread for all instances] + INST([Cache instance created]) --> REG[Schedule evictExpiredOrCompleted\nevery cleanupIntervalSeconds] + REG --> TASK[ScheduledFuture stored per instance] + + subgraph TICK [Every cleanupIntervalSeconds] + T1[Iterate all entries] --> T2{Entry done or failed?} + T2 -- Yes + expired --> T3[Remove entry] + T2 -- No, still in-flight --> T4{Older than ttlSeconds?} + T4 -- Yes --> T5[Cancel prefetch future\nRemove entry] + T4 -- No --> T6[Keep entry] + end + EX --> TICK +``` + +### When to Enable It + +The prefetch cache delivers the most benefit when: + +- Your application pages through results **sequentially** (page 1, 2, 3, …) rather than jumping to arbitrary offsets. +- The database round-trip latency is noticeable (> 50 ms) for each page query. +- Pagination page sizes are consistent across requests for the same query. + +It has minimal impact (and adds slight overhead) when queries jump to random offsets, when all rows fit on a single page, or when the database is so fast that the prefetch rarely completes before the client requests the next page. + +**[IMAGE PROMPT: Create a timeline diagram showing two scenarios side-by-side. Left side: "Without Prefetch Cache" showing sequential client requests each waiting for a DB round-trip. Right side: "With Prefetch Cache" showing the next page being pre-fetched while the current page is delivered, with the second request served instantly from memory. Use a horizontal timeline axis labeled "Time" with colored blocks for DB calls and client waits. Style: Performance comparison diagram with green (fast) vs gray (waiting) blocks.]** + ## 6.9 Configuration Validation and Troubleshooting When things don't work as expected, configuration issues are often the culprit. OJP provides clear error messages when configuration values are invalid or inconsistent. The server validates configuration at startup and fails fast if critical settings are problematic. @@ -434,8 +534,8 @@ The server logs its active configuration at INFO level during startup. Review th OJP server configuration gives you precise control over server behavior, security, performance, and observability. The hierarchical configuration system with JVM properties and environment variables provides flexibility for different deployment scenarios. Default settings work well for most use cases, but understanding the available options lets you optimize for your specific workload. -Key configuration areas include core server settings for network and threading, security controls through IP whitelisting, logging levels for operational visibility, OpenTelemetry integration for observability, circuit breakers for resilience, and slow query segregation for performance under mixed workloads. Each area offers sensible defaults that you can refine based on monitoring data. +Key configuration areas include core server settings for network and threading, security controls through IP whitelisting, logging levels for operational visibility, OpenTelemetry integration for observability, circuit breakers for resilience, slow query segregation for performance under mixed workloads, and the next-page prefetch cache for transparently accelerating paginated queries. Each area offers sensible defaults that you can refine based on monitoring data. Start simple, monitor closely, and adjust based on observed behavior. Good configuration emerges from understanding your workload and using OJP's flexibility to match it, not from cargo-culting settings from other environments. -**[IMAGE PROMPT: Create a summary mind map with "OJP Server Configuration" at the center. Six main branches radiating outward: "Core Settings" (server icon), "Security" (lock icon), "Logging" (document icon), "Telemetry" (graph icon), "Circuit Breaker" (shield icon), and "Slow Query Segregation" (speedometer icon). Each branch has 2-3 sub-branches with key points. Use colors to group related concepts and make it visually hierarchical. Style: Modern mind map with icons and color coding.]** +**[IMAGE PROMPT: Create a summary mind map with "OJP Server Configuration" at the center. Seven main branches radiating outward: "Core Settings" (server icon), "Security" (lock icon), "Logging" (document icon), "Telemetry" (graph icon), "Circuit Breaker" (shield icon), "Slow Query Segregation" (speedometer icon), and "Prefetch Cache" (cache/memory icon). Each branch has 2-3 sub-branches with key points. Use colors to group related concepts and make it visually hierarchical. Style: Modern mind map with icons and color coding.]** diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md new file mode 100644 index 000000000..92f3f8676 --- /dev/null +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -0,0 +1,152 @@ +# Next-Page Prefetch Cache + +The **Next-Page Prefetch Cache** transparently pre-executes the next page query in the background while the current page is being streamed to the client. When the client requests the next page, OJP serves it from memory instead of making a round-trip to the database, eliminating the latency of sequential pagination. + +## How It Works + +### Request Flow + +```mermaid +flowchart TD + A([Client sends paginated query]) --> B{Cache enabled?} + B -- No --> LIVE[Execute query live against DB] + B -- Yes --> C{Entry in cache for this page?} + + C -- No: cache MISS --> LIVE + C -- Yes: entry exists --> D{Prefetch still in flight?} + + D -- Completed --> E{Entry expired?\n> ttlSeconds} + D -- In flight --> F[Wait up to prefetchWaitTimeoutMs] + F -- Completed in time --> E + F -- Timed out --> LIVE + + E -- Expired --> LIVE + E -- Fresh --> SERVE[Serve rows from memory\ncache HIT] + + LIVE --> RESP([Send rows to client]) + SERVE --> RESP + + LIVE --> G{Is the query paginated?} + G -- No --> DONE([Done]) + G -- Yes --> H[Rewrite SQL for next page] + H --> I[Start virtual thread ojp-next-page-prefetch] + I --> J[(DB: execute next-page SQL)] + J --> K[Materialise all rows in memory] + K --> L[Store CachedPage in cache map] + L --> DONE + + SERVE --> M[Remove entry from cache\nsingle-use semantics] + M --> G +``` + +### Background Cleanup + +```mermaid +flowchart TD + BOOT([JVM starts]) --> EX[Create single shared CLEANUP_EXECUTOR\nstatic final virtual thread] + INST([New cache instance created]) --> REG[Register periodic task on shared executor] + REG --> TASK[ScheduledFuture stored in AtomicReference per instance] + EX --> TICK + + subgraph TICK [Every cleanupIntervalSeconds] + direction TB + T1[Iterate all cache entries] --> T2{Entry completed or failed?} + T2 -- Yes + expired --> T3[Remove entry] + T2 -- No: still in-flight --> T4{Created > ttlSeconds ago?} + T4 -- Yes --> T5[Cancel future\nRemove entry] + T4 -- No --> T6[Keep entry] + end +``` + +Only **one** cleanup thread exists per JVM (`ojp-prefetch-cache-cleanup`), shared across all cache instances. It runs as a virtual thread. + +## Pagination Pattern Detection + +OJP automatically detects the following SQL pagination patterns: + +| Pattern | Example | +|---|---| +| `LIMIT n OFFSET m` | `SELECT * FROM t LIMIT 100 OFFSET 200` | +| `OFFSET m ROWS FETCH NEXT n ROWS ONLY` | SQL Server, Oracle | +| `OFFSET m ROWS FETCH FIRST n ROWS ONLY` | DB2, Oracle | +| `FETCH FIRST n ROWS ONLY` (no offset) | First page | +| `FETCH NEXT n ROWS ONLY` (no offset) | First page | +| `LIMIT m, n` | MySQL shorthand | +| `LIMIT n` (no offset) | First page | + +## Cache Isolation + +Each cache entry is keyed by **datasource identifier + normalised SQL**. Two datasources running the same query never share cached data, preventing data leakage between tenants or connections. + +## Configuration Reference + +| Property | Default | Description | +|---|---|---| +| `ojp.server.nextPageCache.enabled` | `false` | Enable the feature (opt-in) | +| `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age of a cached page before eviction | +| `ojp.server.nextPageCache.maxEntries` | `100` | Maximum cache entries across all datasources | +| `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum wait (ms) for an in-flight prefetch before falling back to a live query | +| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction scans | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs` | + +### Per-Datasource Timeout Override + +The `prefetchWaitTimeoutMs` can be overridden for each datasource independently. The datasource name matches the `ojp.datasource.name` client connection property: + +```properties +ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000 +ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 +``` + +The global `prefetchWaitTimeoutMs` is used as the fallback when no per-datasource property is set. + +## Quick Start + +**Enable with defaults:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -jar ojp-server.jar +``` + +**Tuned for a reporting workload:** +```bash +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -Dojp.server.nextPageCache.ttlSeconds=120 \ + -Dojp.server.nextPageCache.maxEntries=200 \ + -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=8000 \ + -jar ojp-server.jar +``` + +**Via environment variables:** +```bash +export OJP_SERVER_NEXTPAGECACHE_ENABLED=true +export OJP_SERVER_NEXTPAGECACHE_TTLSECONDS=60 +export OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS=5000 +export OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS=60 +java -Duser.timezone=UTC -jar ojp-server.jar +``` + +## Interaction with gRPC Row Streaming + +OJP already streams query results to the client in blocks of 100 rows per gRPC message (the intrinsic transport-layer pagination). The prefetch cache operates at a higher level and is completely independent: + +| Layer | What it does | +|---|---| +| **gRPC row streaming** | Slices any single query result into 100-row gRPC messages for efficient transport | +| **Prefetch cache (this feature)** | Pre-executes the *next SQL page query* in the background; the returned rows are then delivered via the same 100-row gRPC streaming | + +The two mechanisms complement each other — the cache eliminates database round-trips, while the gRPC streaming ensures large results are transferred efficiently. + +## When to Use It + +**Best fit:** +- Applications that page through results sequentially (page 1 → 2 → 3 …). +- Database round-trip latency is noticeable (> 50 ms per page). +- Page sizes are consistent across subsequent requests for the same query. + +**Minimal benefit:** +- Queries that jump to arbitrary offsets (random access pagination). +- All rows fit on a single page. +- The database responds faster than the client can consume pages. diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java index 49ba81b61..cf5ed0c1b 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java @@ -103,94 +103,29 @@ void detect_limitOnly_notMatchedWhenOffsetPresent() { } // ---------------------------------------------------------------- - // buildNextPageSql() – LIMIT / OFFSET + // buildNextPageSql() – parameterised round-trip // ---------------------------------------------------------------- - @Test - void buildNextPage_limitOffset_incrementsOffset() { - String sql = "SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 0"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10", nextPage); - } - - @Test - void buildNextPage_limitOffset_secondPage_givesThirdPageSql() { - String sql = "SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 20", nextPage); - } - - // ---------------------------------------------------------------- - // buildNextPageSql() – OFFSET FETCH (SQL Server / Oracle) - // ---------------------------------------------------------------- - - @Test - void buildNextPage_offsetFetch_incrementsOffset() { - String sql = "SELECT id FROM t ORDER BY id OFFSET 0 ROWS FETCH NEXT 20 ROWS ONLY"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY", nextPage); - } - - @Test - void buildNextPage_offsetFetch_secondPage() { - String sql = "SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT id FROM t ORDER BY id OFFSET 40 ROWS FETCH NEXT 20 ROWS ONLY", nextPage); - } - - // ---------------------------------------------------------------- - // buildNextPageSql() – MySQL LIMIT m, n - // ---------------------------------------------------------------- - - @Test - void buildNextPage_limitComma_incrementsOffset() { - // MySQL LIMIT 0, 10: offset=0, pageSize=10 → next: offset=10 - String sql = "SELECT * FROM products LIMIT 0, 10"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT * FROM products LIMIT 10, 10", nextPage); - } - - // ---------------------------------------------------------------- - // buildNextPageSql() – FETCH ONLY (first-page, no OFFSET) - // ---------------------------------------------------------------- - - @Test - void buildNextPage_fetchOnly_insertsOffset() { - String sql = "SELECT * FROM t FETCH FIRST 10 ROWS ONLY"; - PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT * FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY", nextPage); - } - - // ---------------------------------------------------------------- - // buildNextPageSql() – standalone LIMIT (first-page, no OFFSET) - // ---------------------------------------------------------------- - - @Test - void buildNextPage_limitOnly_appendsOffset() { - String sql = "SELECT * FROM users LIMIT 5"; + @ParameterizedTest(name = "[{index}] {0}") + @CsvSource({ + // LIMIT n OFFSET m – first page (offset 0 → 10) + "'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 0', 'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10'", + // LIMIT n OFFSET m – second page (offset 10 → 20) + "'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 10', 'SELECT id FROM users ORDER BY id LIMIT 10 OFFSET 20'", + // OFFSET FETCH (SQL Server / Oracle) – first page (offset 0 → 20) + "'SELECT id FROM t ORDER BY id OFFSET 0 ROWS FETCH NEXT 20 ROWS ONLY', 'SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY'", + // OFFSET FETCH (SQL Server / Oracle) – second page (offset 20 → 40) + "'SELECT id FROM t ORDER BY id OFFSET 20 ROWS FETCH NEXT 20 ROWS ONLY', 'SELECT id FROM t ORDER BY id OFFSET 40 ROWS FETCH NEXT 20 ROWS ONLY'", + // MySQL LIMIT offset, pageSize – first page (offset 0 → 10) + "'SELECT * FROM products LIMIT 0, 10', 'SELECT * FROM products LIMIT 10, 10'", + // FETCH FIRST … ROWS ONLY without OFFSET – inserts OFFSET clause + "'SELECT * FROM t FETCH FIRST 10 ROWS ONLY', 'SELECT * FROM t OFFSET 10 ROWS FETCH FIRST 10 ROWS ONLY'", + // Standalone LIMIT without OFFSET – appends OFFSET clause + "'SELECT * FROM users LIMIT 5', 'SELECT * FROM users LIMIT 5 OFFSET 5'" + }) + void buildNextPageSql_producesCorrectNextPageQuery(String sql, String expected) { PageInfo pageInfo = PaginationDetector.detect(sql).orElseThrow(); - - String nextPage = PaginationDetector.buildNextPageSql(sql, pageInfo); - - assertEquals("SELECT * FROM users LIMIT 5 OFFSET 5", nextPage); + assertEquals(expected, PaginationDetector.buildNextPageSql(sql, pageInfo)); } // ---------------------------------------------------------------- From 27a02cd767c3b770ae624eb920061f6a44f1446a Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 18 Mar 2026 08:37:36 +0000 Subject: [PATCH 16/22] feat: add per-datasource cache enabled flag with tests and docs update Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../configuration/ojp-server-configuration.md | 18 ++++- .../part2-chapter6-server-configuration.md | 25 ++++++- .../features/NEXT_PAGE_PREFETCH_CACHE.md | 22 +++++- .../grpc/server/ServerConfiguration.java | 24 ++++++ .../grpc/server/StatementServiceImpl.java | 24 +++--- .../server/paging/NextPagePrefetchCache.java | 49 +++++++++++- .../NextPageCacheConfigurationTest.java | 59 +++++++++++++++ .../paging/NextPagePrefetchCacheTest.java | 75 +++++++++++++++++++ 8 files changed, 278 insertions(+), 18 deletions(-) diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index 444e7a944..d32e3349f 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -168,11 +168,12 @@ The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET | Property | Environment Variable | Type | Default | Description | Since | |---|---|---|---|---|---| -| `ojp.server.nextPageCache.enabled` | `OJP_SERVER_NEXTPAGECACHE_ENABLED` | boolean | false | Enable/disable the next-page prefetch cache | 0.4.1 | +| `ojp.server.nextPageCache.enabled` | `OJP_SERVER_NEXTPAGECACHE_ENABLED` | boolean | false | Enable/disable the next-page prefetch cache globally | 0.4.1 | | `ojp.server.nextPageCache.ttlSeconds` | `OJP_SERVER_NEXTPAGECACHE_TTLSECONDS` | long | 60 | Maximum time (seconds) a cached page is kept before being discarded | 0.4.1 | | `ojp.server.nextPageCache.maxEntries` | `OJP_SERVER_NEXTPAGECACHE_MAXENTRIES` | int | 100 | Maximum number of cache entries across all datasources | 0.4.1 | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS` | long | 5000 | Maximum time (ms) to wait for a prefetch to complete before falling back to a live query | 0.4.1 | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS` | long | 60 | Interval (seconds) at which the background cleanup thread evicts expired entries | 0.4.1 | +| `ojp.server.nextPageCache.datasource..enabled` | *(no env-var equivalent)* | boolean | *(global default)* | Per-datasource override for `enabled`; `` matches `ojp.datasource.name` on the client | 0.4.1 | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(no env-var equivalent)* | long | *(global default)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` on the client | 0.4.1 | #### Next-Page Prefetch Cache Configuration Examples @@ -193,6 +194,21 @@ java -Duser.timezone=UTC \ -jar ojp-server.jar ``` +**Per-datasource cache control (mixed enable/disable):** +```bash +# Enable globally but disable for a specific datasource (e.g., one with non-sequential access) +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -D"ojp.server.nextPageCache.datasource.random-access.enabled=false" \ + -jar ojp-server.jar + +# Or disable globally but opt a single datasource in +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=false \ + -D"ojp.server.nextPageCache.datasource.reporting.enabled=true" \ + -jar ojp-server.jar +``` + **Per-datasource wait timeout override:** ```bash # Give the "analytics" datasource more time to prefetch large pages diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index 5ea38514e..94fb13cdd 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -451,16 +451,35 @@ java -Duser.timezone=UTC \ | Property | Default | Description | |---|---|---| -| `ojp.server.nextPageCache.enabled` | `false` | Enable/disable the feature | +| `ojp.server.nextPageCache.enabled` | `false` | Enable/disable the feature globally | | `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age (seconds) of a cached page before eviction | | `ojp.server.nextPageCache.maxEntries` | `100` | Maximum number of in-memory cache entries | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum time (ms) to wait for a prefetch to complete; falls back to a live query on timeout | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps | +| `ojp.server.nextPageCache.datasource..enabled` | *(global)* | Per-datasource override for `enabled` (`` matches `ojp.datasource.name` on the client) | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for the wait timeout (`` matches `ojp.datasource.name` on the client) | -### Per-Datasource Wait Timeout +### Per-Datasource Cache Control -Different datasources may have different response-time characteristics. A fast OLTP datasource might need only 1 second, while a heavy analytics datasource might need 10 seconds: +Both `enabled` and `prefetchWaitTimeoutMs` can be configured independently for each datasource. The datasource name matches the `ojp.datasource.name` connection property used by the client application. + +**Mixed enable/disable across datasources:** + +```bash +# Enable globally, but disable for a datasource with random-access patterns +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=true \ + -D"ojp.server.nextPageCache.datasource.random-access.enabled=false" \ + -jar ojp-server.jar + +# Or disable globally, opting in only a single reporting datasource +java -Duser.timezone=UTC \ + -Dojp.server.nextPageCache.enabled=false \ + -D"ojp.server.nextPageCache.datasource.reporting.enabled=true" \ + -jar ojp-server.jar +``` + +**Per-datasource wait timeout (different DB response times):** ```bash java -Duser.timezone=UTC \ diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md index 92f3f8676..06812729a 100644 --- a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -82,23 +82,37 @@ Each cache entry is keyed by **datasource identifier + normalised SQL**. Two dat | Property | Default | Description | |---|---|---| -| `ojp.server.nextPageCache.enabled` | `false` | Enable the feature (opt-in) | +| `ojp.server.nextPageCache.enabled` | `false` | Enable the feature globally (opt-in) | | `ojp.server.nextPageCache.ttlSeconds` | `60` | Maximum age of a cached page before eviction | | `ojp.server.nextPageCache.maxEntries` | `100` | Maximum cache entries across all datasources | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum wait (ms) for an in-flight prefetch before falling back to a live query | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction scans | +| `ojp.server.nextPageCache.datasource..enabled` | *(global)* | Per-datasource override for `enabled`; takes precedence over the global flag | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs` | -### Per-Datasource Timeout Override +### Per-Datasource Configuration -The `prefetchWaitTimeoutMs` can be overridden for each datasource independently. The datasource name matches the `ojp.datasource.name` client connection property: +Both `enabled` and `prefetchWaitTimeoutMs` can be overridden independently for each datasource. +The datasource name matches the `ojp.datasource.name` client connection property: ```properties +# Globally enable the cache, but disable it for the OLAP datasource +ojp.server.nextPageCache.enabled=true +ojp.server.nextPageCache.datasource.olap.enabled=false + +# Per-datasource timeout tuning ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000 ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 ``` -The global `prefetchWaitTimeoutMs` is used as the fallback when no per-datasource property is set. +> **Note:** You can also use per-datasource `enabled` to opt individual datasources **in** when the +> global flag is `false`: +> ```properties +> ojp.server.nextPageCache.enabled=false +> ojp.server.nextPageCache.datasource.reporting.enabled=true +> ``` + +The global values are used as fallback when no per-datasource property is set for a given datasource. ## Quick Start diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index 01fdaf18d..2d1a77ca5 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -723,6 +723,30 @@ public long getNextPageCachePrefetchWaitTimeoutMs(String datasourceName) { return nextPageCachePrefetchWaitTimeoutMs; } + /** + * Returns whether the next-page prefetch cache is enabled for a specific datasource. + * + *

    If a per-datasource override is configured via + * {@code ojp.server.nextPageCache.datasource..enabled}, + * that value is returned. Otherwise the global + * {@code ojp.server.nextPageCache.enabled} is used as the fallback.

    + * + * @param datasourceName the {@code ojp.datasource.name} value from the client connection + * properties; {@code null} or {@code "default"} always returns + * the global default + * @return {@code true} if the prefetch cache is enabled for the given datasource + */ + public boolean isNextPageCacheEnabled(String datasourceName) { + if (datasourceName != null && !datasourceName.isEmpty() && !"default".equals(datasourceName)) { + String perDatasourceKey = "ojp.server.nextPageCache.datasource." + datasourceName + ".enabled"; + String raw = getStringProperty(perDatasourceKey, null); + if (raw != null) { + return Boolean.parseBoolean(raw); + } + } + return nextPageCacheEnabled; + } + public long getNextPageCacheCleanupIntervalSeconds() { return nextPageCacheCleanupIntervalSeconds; } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index dd74cc3bd..6e074941e 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -215,8 +215,9 @@ private void initializeXAPoolProvider() { @Override public void connect(ConnectionDetails connectionDetails, StreamObserver responseObserver) { - // Register per-datasource prefetch wait timeout so that getIfReady() uses the - // correct timeout for this datasource rather than the global default. + // Register per-datasource prefetch wait timeout and enabled flag so that + // getIfReady() and prefetchAsync() use the correct settings for this datasource + // rather than the global defaults. if (nextPagePrefetchCache.isEnabled()) { String connHash = org.openjproxy.grpc.server.utils.ConnectionHashGenerator .hashConnectionDetails(connectionDetails); @@ -224,7 +225,10 @@ public void connect(ConnectionDetails connectionDetails, StreamObserver cached = nextPagePrefetchCache.getIfReady(connHash, sql); - if (cached.isPresent()) { - CachedPage page = cached.get(); - // Start prefetch for the page after this one before returning the cached result - startNextPagePrefetch(sql, params, connHash); - streamCachedPage(page, dto.getSession(), responseObserver); - return; + if (nextPagePrefetchCache.isEnabledForDatasource(connHash)) { + Optional cached = nextPagePrefetchCache.getIfReady(connHash, sql); + if (cached.isPresent()) { + CachedPage page = cached.get(); + // Start prefetch for the page after this one before returning the cached result + startNextPagePrefetch(sql, params, connHash); + streamCachedPage(page, dto.getSession(), responseObserver); + return; + } } } // ---- End next-page prefetch cache check ---- diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java index 849796b4f..018851c97 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java @@ -104,6 +104,15 @@ public class NextPagePrefetchCache implements AutoCloseable { private final ConcurrentHashMap datasourcePrefetchWaitTimeoutMs = new ConcurrentHashMap<>(); + /** + * Per-datasource cache-enabled overrides. + * Key: datasource connection hash (see {@code ConnectionHashGenerator}). + * Value: {@code true} to enable, {@code false} to disable the cache for this datasource. + * When an entry is present it takes precedence over the global {@link #enabled} flag. + */ + private final ConcurrentHashMap datasourceCacheEnabled + = new ConcurrentHashMap<>(); + /** * Maps {@code "\u0001"} to the asynchronous result of the prefetch. * Including the datasource ID in the key ensures that two different datasources executing @@ -184,6 +193,44 @@ public void registerDatasourcePrefetchWaitTimeout(String datasourceId, long time } } + /** + * Registers whether the prefetch cache is enabled for a specific datasource, + * overriding the global {@link #enabled} flag for that datasource. + * + *

    Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.

    + * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param cacheEnabled {@code true} to enable caching, {@code false} to disable it + * for this specific datasource + */ + public void registerDatasourceCacheEnabled(String datasourceId, boolean cacheEnabled) { + if (datasourceId != null) { + datasourceCacheEnabled.put(datasourceId, cacheEnabled); + log.debug("Registered per-datasource cacheEnabled={} for datasourceId={}", + cacheEnabled, datasourceId); + } + } + + /** + * Returns whether the cache is enabled for the given datasource. + * If a per-datasource override has been registered via + * {@link #registerDatasourceCacheEnabled}, that value takes precedence + * over the global {@link #enabled} flag. + * + * @param datasourceId the connection hash for the datasource; may be {@code null} + * @return {@code true} if caching should be used for this datasource + */ + public boolean isEnabledForDatasource(String datasourceId) { + if (datasourceId != null) { + Boolean override = datasourceCacheEnabled.get(datasourceId); + if (override != null) { + return override; + } + } + return enabled; + } + /** * Cancels this instance's periodic cleanup task on the shared executor. * The shared executor itself is left running so that other cache instances @@ -294,7 +341,7 @@ public Optional getIfReady(String datasourceId, String sql) { */ public void prefetchAsync(DataSource dataSource, String datasourceId, String nextPageSql, List params) { - if (!enabled || dataSource == null || nextPageSql == null) { + if (!isEnabledForDatasource(datasourceId) || dataSource == null || nextPageSql == null) { return; } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index 240be1e06..e73e54a05 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -242,4 +242,63 @@ void perDatasource_multipleOverrides_areIndependent() { System.clearProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs"); System.clearProperty(WAIT_TIMEOUT_MS_KEY); } + + // ---------------------------------------------------------------- + // Per-datasource cache enabled flag + // ---------------------------------------------------------------- + + @Test + void perDatasource_cacheEnabled_canBeDisabledIndividually() { + System.setProperty(ENABLED_KEY, "true"); + System.setProperty("ojp.server.nextPageCache.datasource.disabled-ds.enabled", "false"); + + ServerConfiguration config = new ServerConfiguration(); + + assertTrue(config.isNextPageCacheEnabled()); + assertFalse(config.isNextPageCacheEnabled("disabled-ds")); + assertTrue(config.isNextPageCacheEnabled("other-ds")); // falls back to global=true + + System.clearProperty("ojp.server.nextPageCache.datasource.disabled-ds.enabled"); + } + + @Test + void perDatasource_cacheEnabled_canBeEnabledWhenGloballyDisabled() { + System.setProperty(ENABLED_KEY, "false"); + System.setProperty("ojp.server.nextPageCache.datasource.special-ds.enabled", "true"); + + ServerConfiguration config = new ServerConfiguration(); + + assertFalse(config.isNextPageCacheEnabled()); + assertTrue(config.isNextPageCacheEnabled("special-ds")); + assertFalse(config.isNextPageCacheEnabled("other-ds")); // falls back to global=false + + System.clearProperty("ojp.server.nextPageCache.datasource.special-ds.enabled"); + } + + @ParameterizedTest + @NullSource + @ValueSource(strings = {"unknown-ds", "default"}) + void perDatasource_cacheEnabled_fallsBackToGlobalDefault_whenNoPerDatasourcePropertySet(String datasourceName) { + System.setProperty(ENABLED_KEY, "true"); + + ServerConfiguration config = new ServerConfiguration(); + + assertTrue(config.isNextPageCacheEnabled(datasourceName)); + } + + @Test + void perDatasource_multipleCacheEnabledOverrides_areIndependent() { + System.setProperty(ENABLED_KEY, "true"); + System.setProperty("ojp.server.nextPageCache.datasource.ds-on.enabled", "true"); + System.setProperty("ojp.server.nextPageCache.datasource.ds-off.enabled", "false"); + + ServerConfiguration config = new ServerConfiguration(); + + assertTrue(config.isNextPageCacheEnabled("ds-on")); + assertFalse(config.isNextPageCacheEnabled("ds-off")); + assertTrue(config.isNextPageCacheEnabled("ds-fallback")); // falls back to global=true + + System.clearProperty("ojp.server.nextPageCache.datasource.ds-on.enabled"); + System.clearProperty("ojp.server.nextPageCache.datasource.ds-off.enabled"); + } } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index dbf0ec666..72656246d 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -494,4 +494,79 @@ void registerDatasourcePrefetchWaitTimeout_replacesExistingValue() throws Except Optional result = cache.getIfReady("ds-x", sql); assertTrue(result.isPresent()); } + + // ---------------------------------------------------------------- + // Per-datasource cache enabled flag + // ---------------------------------------------------------------- + + @Test + void registerDatasourceCacheEnabled_ignoresNullId() { + NextPagePrefetchCache cache = enabledCache(); + // Null datasourceId should be silently ignored (no NullPointerException) + cache.registerDatasourceCacheEnabled(null, false); + assertEquals(0, cache.cacheSize(), "Cache should remain empty when datasourceId is null"); + } + + @Test + void isEnabledForDatasource_returnsTrueByDefault_whenGloballyEnabled() { + NextPagePrefetchCache cache = enabledCache(); + assertTrue(cache.isEnabledForDatasource("any-ds"), + "Should return true when no per-datasource override is registered"); + } + + @Test + void isEnabledForDatasource_returnsFalseByDefault_whenGloballyDisabled() { + NextPagePrefetchCache cache = disabledCache(); + assertFalse(cache.isEnabledForDatasource("any-ds"), + "Should return false when cache is globally disabled"); + } + + @Test + void isEnabledForDatasource_respectsPerDatasourceOverride_disabled() { + NextPagePrefetchCache cache = enabledCache(); // globally enabled + cache.registerDatasourceCacheEnabled("disabled-ds", false); + + assertFalse(cache.isEnabledForDatasource("disabled-ds"), + "Per-datasource false should override the global true"); + assertTrue(cache.isEnabledForDatasource("other-ds"), + "Other datasources not overridden should still use the global setting"); + } + + @Test + void isEnabledForDatasource_respectsPerDatasourceOverride_enabled() { + NextPagePrefetchCache cache = disabledCache(); // globally disabled + cache.registerDatasourceCacheEnabled("special-ds", true); + + assertTrue(cache.isEnabledForDatasource("special-ds"), + "Per-datasource true should override the global false"); + assertFalse(cache.isEnabledForDatasource("other-ds"), + "Other datasources not overridden should still use the global setting"); + } + + @Test + void prefetchAsync_isSkipped_whenPerDatasourceDisabled() throws Exception { + NextPagePrefetchCache cache = enabledCache(); // globally enabled + cache.registerDatasourceCacheEnabled("disabled-ds", false); + + DataSource ds = mockDataSource(5); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "disabled-ds", sql, List.of()); + + assertEquals(0, cache.cacheSize(), "Prefetch should be skipped for disabled datasource"); + } + + @Test + void prefetchAsync_isAllowed_whenPerDatasourceEnabled_andGloballyDisabled() throws Exception { + NextPagePrefetchCache cache = disabledCache(); // globally disabled + cache.registerDatasourceCacheEnabled("special-ds", true); + + DataSource ds = mockDataSource(3); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "special-ds", sql, List.of()); + + Thread.sleep(50); //NOSONAR + Optional result = cache.getIfReady("special-ds", sql); + assertTrue(result.isPresent(), "Per-datasource enabled override should allow prefetch"); + assertEquals(3, result.get().getRows().size()); + } } From 37a84bff5c8fea93dc6bf8727028d67ee33a1f8c Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 18 Mar 2026 09:03:18 +0000 Subject: [PATCH 17/22] refactor: per-datasource cache enabled is client-side property (ojp.nextPageCache.enabled) Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../configuration/ojp-server-configuration.md | 27 ++++----- .../part2-chapter6-server-configuration.md | 21 ++----- .../features/NEXT_PAGE_PREFETCH_CACHE.md | 27 ++++----- .../openjproxy/constants/CommonConstants.java | 1 + .../grpc/server/ServerConfiguration.java | 24 -------- .../grpc/server/StatementServiceImpl.java | 16 ++++- .../NextPageCacheConfigurationTest.java | 58 ------------------- .../paging/NextPagePrefetchCacheTest.java | 47 +++++++++++++++ 8 files changed, 89 insertions(+), 132 deletions(-) diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index d32e3349f..fed2b3347 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -173,9 +173,17 @@ The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET | `ojp.server.nextPageCache.maxEntries` | `OJP_SERVER_NEXTPAGECACHE_MAXENTRIES` | int | 100 | Maximum number of cache entries across all datasources | 0.4.1 | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `OJP_SERVER_NEXTPAGECACHE_PREFETCHWAITTIMEOUTMS` | long | 5000 | Maximum time (ms) to wait for a prefetch to complete before falling back to a live query | 0.4.1 | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `OJP_SERVER_NEXTPAGECACHE_CLEANUPINTERVALSECONDS` | long | 60 | Interval (seconds) at which the background cleanup thread evicts expired entries | 0.4.1 | -| `ojp.server.nextPageCache.datasource..enabled` | *(no env-var equivalent)* | boolean | *(global default)* | Per-datasource override for `enabled`; `` matches `ojp.datasource.name` on the client | 0.4.1 | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(no env-var equivalent)* | long | *(global default)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` on the client | 0.4.1 | +> **Per-datasource `enabled` is a client-side setting.** +> Each datasource in the client application can independently opt in or out of the prefetch cache +> by setting `ojp.nextPageCache.enabled=false` in its `ojp.properties`: +> ```properties +> # ojp.properties — client application +> # Disable the prefetch cache for the "random-access" datasource +> random-access.ojp.nextPageCache.enabled=false +> ``` + #### Next-Page Prefetch Cache Configuration Examples **Enable the cache with default settings:** @@ -194,22 +202,7 @@ java -Duser.timezone=UTC \ -jar ojp-server.jar ``` -**Per-datasource cache control (mixed enable/disable):** -```bash -# Enable globally but disable for a specific datasource (e.g., one with non-sequential access) -java -Duser.timezone=UTC \ - -Dojp.server.nextPageCache.enabled=true \ - -D"ojp.server.nextPageCache.datasource.random-access.enabled=false" \ - -jar ojp-server.jar - -# Or disable globally but opt a single datasource in -java -Duser.timezone=UTC \ - -Dojp.server.nextPageCache.enabled=false \ - -D"ojp.server.nextPageCache.datasource.reporting.enabled=true" \ - -jar ojp-server.jar -``` - -**Per-datasource wait timeout override:** +**Per-datasource wait timeout override (server-side):** ```bash # Give the "analytics" datasource more time to prefetch large pages java -Duser.timezone=UTC \ diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index 94fb13cdd..868807d2c 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -456,27 +456,18 @@ java -Duser.timezone=UTC \ | `ojp.server.nextPageCache.maxEntries` | `100` | Maximum number of in-memory cache entries | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum time (ms) to wait for a prefetch to complete; falls back to a live query on timeout | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps | -| `ojp.server.nextPageCache.datasource..enabled` | *(global)* | Per-datasource override for `enabled` (`` matches `ojp.datasource.name` on the client) | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for the wait timeout (`` matches `ojp.datasource.name` on the client) | ### Per-Datasource Cache Control -Both `enabled` and `prefetchWaitTimeoutMs` can be configured independently for each datasource. The datasource name matches the `ojp.datasource.name` connection property used by the client application. +The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the client application can independently opt in or out of the prefetch cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` file — no server restart needed: -**Mixed enable/disable across datasources:** +```properties +# ojp.properties — client application +# Default datasource: cache enabled (uses server global default) -```bash -# Enable globally, but disable for a datasource with random-access patterns -java -Duser.timezone=UTC \ - -Dojp.server.nextPageCache.enabled=true \ - -D"ojp.server.nextPageCache.datasource.random-access.enabled=false" \ - -jar ojp-server.jar - -# Or disable globally, opting in only a single reporting datasource -java -Duser.timezone=UTC \ - -Dojp.server.nextPageCache.enabled=false \ - -D"ojp.server.nextPageCache.datasource.reporting.enabled=true" \ - -jar ojp-server.jar +# "random-access" datasource: disable the prefetch cache +random-access.ojp.nextPageCache.enabled=false ``` **Per-datasource wait timeout (different DB response times):** diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md index 06812729a..bcf03dab0 100644 --- a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -87,32 +87,29 @@ Each cache entry is keyed by **datasource identifier + normalised SQL**. Two dat | `ojp.server.nextPageCache.maxEntries` | `100` | Maximum cache entries across all datasources | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum wait (ms) for an in-flight prefetch before falling back to a live query | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction scans | -| `ojp.server.nextPageCache.datasource..enabled` | *(global)* | Per-datasource override for `enabled`; takes precedence over the global flag | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs` | ### Per-Datasource Configuration -Both `enabled` and `prefetchWaitTimeoutMs` can be overridden independently for each datasource. -The datasource name matches the `ojp.datasource.name` client connection property: +The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the +client application can independently opt in or out of the prefetch cache by setting +`ojp.nextPageCache.enabled` in its `ojp.properties` file: ```properties -# Globally enable the cache, but disable it for the OLAP datasource -ojp.server.nextPageCache.enabled=true -ojp.server.nextPageCache.datasource.olap.enabled=false +# ojp.properties — client application -# Per-datasource timeout tuning +# Default datasource: cache enabled (uses server global default) + +# "olap" datasource: disable the prefetch cache +olap.ojp.nextPageCache.enabled=false + +# Per-datasource timeout tuning (server-side) ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000 ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 ``` -> **Note:** You can also use per-datasource `enabled` to opt individual datasources **in** when the -> global flag is `false`: -> ```properties -> ojp.server.nextPageCache.enabled=false -> ojp.server.nextPageCache.datasource.reporting.enabled=true -> ``` - -The global values are used as fallback when no per-datasource property is set for a given datasource. +The `prefetchWaitTimeoutMs` can be overridden on the server side per datasource name (which matches +the `ojp.datasource.name` connection property the client sends on connect). ## Quick Start diff --git a/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java b/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java index 1cf795f21..ef59a4e54 100644 --- a/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java +++ b/ojp-grpc-commons/src/main/java/org/openjproxy/constants/CommonConstants.java @@ -26,6 +26,7 @@ public class CommonConstants { // Configuration property keys public static final String DATASOURCE_NAME_PROPERTY = "ojp.datasource.name"; + public static final String NEXT_PAGE_CACHE_ENABLED_PROPERTY = "ojp.nextPageCache.enabled"; public static final String MAXIMUM_POOL_SIZE_PROPERTY = "ojp.connection.pool.maximumPoolSize"; public static final String MINIMUM_IDLE_PROPERTY = "ojp.connection.pool.minimumIdle"; public static final String IDLE_TIMEOUT_PROPERTY = "ojp.connection.pool.idleTimeout"; diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java index 2d1a77ca5..01fdaf18d 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java @@ -723,30 +723,6 @@ public long getNextPageCachePrefetchWaitTimeoutMs(String datasourceName) { return nextPageCachePrefetchWaitTimeoutMs; } - /** - * Returns whether the next-page prefetch cache is enabled for a specific datasource. - * - *

    If a per-datasource override is configured via - * {@code ojp.server.nextPageCache.datasource..enabled}, - * that value is returned. Otherwise the global - * {@code ojp.server.nextPageCache.enabled} is used as the fallback.

    - * - * @param datasourceName the {@code ojp.datasource.name} value from the client connection - * properties; {@code null} or {@code "default"} always returns - * the global default - * @return {@code true} if the prefetch cache is enabled for the given datasource - */ - public boolean isNextPageCacheEnabled(String datasourceName) { - if (datasourceName != null && !datasourceName.isEmpty() && !"default".equals(datasourceName)) { - String perDatasourceKey = "ojp.server.nextPageCache.datasource." + datasourceName + ".enabled"; - String raw = getStringProperty(perDatasourceKey, null); - if (raw != null) { - return Boolean.parseBoolean(raw); - } - } - return nextPageCacheEnabled; - } - public long getNextPageCacheCleanupIntervalSeconds() { return nextPageCacheCleanupIntervalSeconds; } diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java index 6e074941e..0b5d6295d 100644 --- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java +++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java @@ -225,10 +225,20 @@ public void connect(ConnectionDetails connectionDetails, StreamObserver clientProps = + ProtoConverter.propertiesFromProto(connectionDetails.getPropertiesList()); + Object clientEnabledRaw = clientProps.get(CommonConstants.NEXT_PAGE_CACHE_ENABLED_PROPERTY); + if (clientEnabledRaw != null) { + boolean clientCacheEnabled = Boolean.parseBoolean(clientEnabledRaw.toString()); + nextPagePrefetchCache.registerDatasourceCacheEnabled(connHash, clientCacheEnabled); + } } org.openjproxy.grpc.server.action.connection.ConnectAction.getInstance() .execute(actionContext, connectionDetails, responseObserver); diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index e73e54a05..31d56afa1 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -243,62 +243,4 @@ void perDatasource_multipleOverrides_areIndependent() { System.clearProperty(WAIT_TIMEOUT_MS_KEY); } - // ---------------------------------------------------------------- - // Per-datasource cache enabled flag - // ---------------------------------------------------------------- - - @Test - void perDatasource_cacheEnabled_canBeDisabledIndividually() { - System.setProperty(ENABLED_KEY, "true"); - System.setProperty("ojp.server.nextPageCache.datasource.disabled-ds.enabled", "false"); - - ServerConfiguration config = new ServerConfiguration(); - - assertTrue(config.isNextPageCacheEnabled()); - assertFalse(config.isNextPageCacheEnabled("disabled-ds")); - assertTrue(config.isNextPageCacheEnabled("other-ds")); // falls back to global=true - - System.clearProperty("ojp.server.nextPageCache.datasource.disabled-ds.enabled"); - } - - @Test - void perDatasource_cacheEnabled_canBeEnabledWhenGloballyDisabled() { - System.setProperty(ENABLED_KEY, "false"); - System.setProperty("ojp.server.nextPageCache.datasource.special-ds.enabled", "true"); - - ServerConfiguration config = new ServerConfiguration(); - - assertFalse(config.isNextPageCacheEnabled()); - assertTrue(config.isNextPageCacheEnabled("special-ds")); - assertFalse(config.isNextPageCacheEnabled("other-ds")); // falls back to global=false - - System.clearProperty("ojp.server.nextPageCache.datasource.special-ds.enabled"); - } - - @ParameterizedTest - @NullSource - @ValueSource(strings = {"unknown-ds", "default"}) - void perDatasource_cacheEnabled_fallsBackToGlobalDefault_whenNoPerDatasourcePropertySet(String datasourceName) { - System.setProperty(ENABLED_KEY, "true"); - - ServerConfiguration config = new ServerConfiguration(); - - assertTrue(config.isNextPageCacheEnabled(datasourceName)); - } - - @Test - void perDatasource_multipleCacheEnabledOverrides_areIndependent() { - System.setProperty(ENABLED_KEY, "true"); - System.setProperty("ojp.server.nextPageCache.datasource.ds-on.enabled", "true"); - System.setProperty("ojp.server.nextPageCache.datasource.ds-off.enabled", "false"); - - ServerConfiguration config = new ServerConfiguration(); - - assertTrue(config.isNextPageCacheEnabled("ds-on")); - assertFalse(config.isNextPageCacheEnabled("ds-off")); - assertTrue(config.isNextPageCacheEnabled("ds-fallback")); // falls back to global=true - - System.clearProperty("ojp.server.nextPageCache.datasource.ds-on.enabled"); - System.clearProperty("ojp.server.nextPageCache.datasource.ds-off.enabled"); - } } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index 72656246d..f98f6852b 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -569,4 +569,51 @@ void prefetchAsync_isAllowed_whenPerDatasourceEnabled_andGloballyDisabled() thro assertTrue(result.isPresent(), "Per-datasource enabled override should allow prefetch"); assertEquals(3, result.get().getRows().size()); } + + // ---------------------------------------------------------------- + // Client-side ojp.nextPageCache.enabled property (simulates connect() logic) + // ---------------------------------------------------------------- + + @Test + void clientProperty_disable_registersOverrideViaCache() { + // Simulate what StatementServiceImpl.connect() does when the client sends + // ojp.nextPageCache.enabled=false in its connection properties. + NextPagePrefetchCache cache = enabledCache(); // globally enabled + + // Client sends the property; connect() reads it and registers with the cache + String clientEnabledValue = "false"; + cache.registerDatasourceCacheEnabled("conn-abc", Boolean.parseBoolean(clientEnabledValue)); + + assertFalse(cache.isEnabledForDatasource("conn-abc"), + "Client property ojp.nextPageCache.enabled=false should disable cache for that connection"); + assertTrue(cache.isEnabledForDatasource("conn-xyz"), + "Other connections without an override should still use the global setting"); + } + + @Test + void clientProperty_enable_overridesGlobalDisable() { + // Client sends ojp.nextPageCache.enabled=true while the server global is false. + NextPagePrefetchCache cache = disabledCache(); // globally disabled + + cache.registerDatasourceCacheEnabled("conn-reporting", Boolean.parseBoolean("true")); + + assertTrue(cache.isEnabledForDatasource("conn-reporting"), + "Client property ojp.nextPageCache.enabled=true should enable cache even when globally disabled"); + assertFalse(cache.isEnabledForDatasource("conn-other"), + "Connections without an override should still reflect the global disabled setting"); + } + + @Test + void clientProperty_absent_fallsBackToGlobal() { + // When the client does NOT send ojp.nextPageCache.enabled, connect() does not call + // registerDatasourceCacheEnabled, so isEnabledForDatasource falls back to the global flag. + NextPagePrefetchCache cacheEnabled = enabledCache(); + NextPagePrefetchCache cacheDisabled = disabledCache(); + + // No registration performed (client property was absent) + assertTrue(cacheEnabled.isEnabledForDatasource("conn-no-prop"), + "Absent client property on globally-enabled cache should default to true"); + assertFalse(cacheDisabled.isEnabledForDatasource("conn-no-prop"), + "Absent client property on globally-disabled cache should default to false"); + } } From 065b101a3b81dcdf0a83bbccd2474a1b6801a601 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 18 Mar 2026 21:29:13 +0000 Subject: [PATCH 18/22] feat: add prefetch cache integration tests for MySQL, MariaDB, CockroachDB, DB2, Oracle, SQL Server, and H2 Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .github/workflows/main.yml | 111 ++++++- ...roachDBPaginationCacheIntegrationTest.java | 260 +++++++++++++++++ .../Db2PaginationCacheIntegrationTest.java | 262 +++++++++++++++++ .../H2PaginationCacheIntegrationTest.java | 225 +++++++++++++++ ...MariaDBPaginationCacheIntegrationTest.java | 257 +++++++++++++++++ .../OraclePaginationCacheIntegrationTest.java | 259 +++++++++++++++++ ...LServerPaginationCacheIntegrationTest.java | 270 ++++++++++++++++++ ...ServerPrefetchCacheConnectionProvider.java | 74 +++++ ...h_cache_connections_with_record_counts.csv | 5 + ...h_cache_connections_with_record_counts.csv | 5 + ...h_cache_connections_with_record_counts.csv | 5 + ...h_cache_connections_with_record_counts.csv | 5 + ...h_cache_connections_with_record_counts.csv | 5 + ...h_cache_connections_with_record_counts.csv | 5 + 14 files changed, 1735 insertions(+), 13 deletions(-) create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java create mode 100644 ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java create mode 100644 ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv create mode 100644 ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv create mode 100644 ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv create mode 100644 ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv create mode 100644 ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv create mode 100644 ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 5726ab3f0..574333ae2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -101,6 +101,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -117,11 +125,15 @@ jobs: # This is the key test step - only H2 tests are enabled # All other database tests are disabled by default - name: Test (ojp-jdbc-driver) with H2 only - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableH2Tests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableH2Tests=true -DenableH2PrefetchCacheTests=true - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 2: PostgreSQL Integration Tests @@ -356,6 +368,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -371,11 +391,15 @@ jobs: # Run MySQL-specific tests with -DenableMySQLTests flag - name: Test (ojp-jdbc-driver) with MySQL enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMySQLTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMySQLTests=true -DenableMySQLPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # =========================================================================== @@ -452,6 +476,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -467,11 +499,15 @@ jobs: # Run MariaDB-specific tests with -DenableMariaDBTests flag - name: Test (ojp-jdbc-driver) with MariaDB enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMariaDBTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableMariaDBTests=true -DenableMariaDBPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 5: CockroachDB Integration Tests @@ -539,6 +575,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -554,11 +598,15 @@ jobs: # Run CockroachDB-specific tests with -DenableCockroachDBTests flag - name: Test (ojp-jdbc-driver) with CockroachDB enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableCockroachDBTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableCockroachDBTests=true -DenableCockroachDBPrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 6: DB2 Integration Tests @@ -707,6 +755,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -722,11 +778,15 @@ jobs: # Run DB2-specific tests with -DenableDb2Tests flag - name: Test (ojp-jdbc-driver) with DB2 enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableDb2Tests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableDb2Tests=true -DenableDb2PrefetchCacheTests=true - name: Show ojp-server.log if: always() - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 7: Multinode Integration Tests @@ -1703,6 +1763,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -1718,11 +1786,15 @@ jobs: # Run Oracle-specific tests with -DenableOracleTests flag - name: Test (ojp-jdbc-driver) with Oracle enabled - run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableOracleTests=true + run: mvn test -pl ojp-jdbc-driver -Dgpg.skip=true -DenableOracleTests=true -DenableOraclePrefetchCacheTests=true - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 10: SQL Server Integration Tests @@ -1806,6 +1878,14 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ rrobetti/ojp:0.4.1-SNAPSHOT + # Pagination-cache integration tests run against this server (port 10594) + - name: Start OJP Server container (prefetch cache on port 10594) + run: | + docker run -d --name ojp-server-prefetch-cache \ + --network host \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ + rrobetti/ojp:0.4.1-SNAPSHOT + - name: Wait for ojp-server to start run: sleep 10 @@ -1826,11 +1906,16 @@ jobs: run: | mvn test -pl ojp-jdbc-driver -Dgpg.skip=true \ -DenableSqlServerTests=true \ + -DenableSqlServerPrefetchCacheTests=true \ -Dtest="SQLServer*" - name: Show ojp-server.log if: always() # ensures it runs even if previous steps fail - run: docker logs ojp-server 2>&1 || echo "ojp-server container not found" + run: | + docker logs ojp-server 2>&1 || echo "ojp-server container not found" + echo "" + echo "=== OJP Server (with prefetch cache) log ===" + docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 11: Notify Integration Repository diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..2ef63d2bc --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java @@ -0,0 +1,260 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a CockroachDB backend. + * + *

    CockroachDB is PostgreSQL-wire-compatible, so it uses the same {@code LIMIT n OFFSET m} + * pagination syntax and {@code BYTEA} binary type as the PostgreSQL test. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code BYTEA} column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code BYTEA} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class CockroachDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(CockroachDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableCockroachDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for CockroachDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/cockroachdb_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "CockroachDB prefetch-cache tests are disabled " + + "(pass -DenableCockroachDBPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_crdb_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INT      PRIMARY KEY      – 1-based row identifier
    +     *   name       VARCHAR  NOT NULL         – "record_{id}"
    +     *   val_int    INT      NOT NULL         – id × 10
    +     *   val_bigint BIGINT   NOT NULL         – id × 1,000,000
    +     *   val_bool   BOOLEAN  NOT NULL         – true when id is even
    +     *   val_text   TEXT     NOT NULL         – "text_value_for_row_{id}"
    +     *   val_bytea  BYTEA    NOT NULL         – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text TEXT NOT NULL," + + " val_bytea BYTEA NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBytea(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + assertBytea(expectedBytea(id), rs.getObject("val_bytea"), + "val_bytea for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBytea(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } + + /** + * Asserts that {@code actual} (which may be a {@code byte[]} or the hex-escape + * {@code String} {@code "\\xHH…"}) equals {@code expected} byte-for-byte. + */ + private static void assertBytea(byte[] expected, Object actual, String columnLabel) { + assertNotNull(actual, columnLabel + " must not be null"); + + byte[] actualBytes; + if (actual instanceof byte[]) { + actualBytes = (byte[]) actual; + } else if (actual instanceof String) { + String s = (String) actual; + if (s.startsWith("\\x") || s.startsWith("\\X")) { + actualBytes = hexStringToBytes(s.substring(2)); + } else { + actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8); + } + } else { + actualBytes = fail(columnLabel + " has unexpected type " + actual.getClass().getName()); + } + + assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match"); + } + + private static byte[] hexStringToBytes(String hex) { + if (hex.isEmpty()) { + return new byte[0]; + } + int len = hex.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(hex.charAt(i), 16) << 4) + + Character.digit(hex.charAt(i + 1), 16)); + } + return data; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java new file mode 100644 index 000000000..38c4f248d --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java @@ -0,0 +1,262 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an IBM DB2 backend. + * + *

    DB2 uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table under the {@code DB2INST1} schema with multiple column types, + * including a {@code BLOB} column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code BLOB} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class Db2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(Db2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableDb2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for DB2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/db2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "DB2 prefetch-cache tests are disabled " + + "(pass -DenableDb2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "DB2INST1.ojp_pfx_db2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // DB2 requires explicit schema to avoid "object not found" errors + try (Statement schemaStmt = conn.createStatement()) { + schemaStmt.execute("SET SCHEMA DB2INST1"); + } + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INTEGER      NOT NULL PRIMARY KEY  – 1-based row identifier
    +     *   name       VARCHAR(100) NOT NULL              – "record_{id}"
    +     *   val_int    INTEGER      NOT NULL              – id × 10
    +     *   val_bigint BIGINT       NOT NULL              – id × 1,000,000
    +     *   val_bool   SMALLINT     NOT NULL              – 1 when id is even, else 0
    +     *   val_text   VARCHAR(255) NOT NULL              – "text_value_for_row_{id}"
    +     *   val_blob   BLOB(1K)     NOT NULL              – four deterministic bytes derived from id
    +     * 
    + * + *

    Note: DB2 does not have a native BOOLEAN SQL type in older versions; {@code SMALLINT} (0/1) + * is used as a portable substitute. TEXT is replaced with VARCHAR(255). + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + // Drop if exists (DB2 uses different DROP syntax) + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + } catch (SQLException e) { + // Table does not exist – ignore + } + try (Statement stmt = conn.createStatement()) { + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INTEGER NOT NULL," + + " name VARCHAR(100) NOT NULL," + + " val_int INTEGER NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool SMALLINT NOT NULL," + + " val_text VARCHAR(255) NOT NULL," + + " val_blob BLOB(1K) NOT NULL," + + " PRIMARY KEY (id)" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBlob(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // DB2 uses OFFSET m ROWS FETCH NEXT n ROWS ONLY + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + byte[] actualBlob = toBlobBytes(rs, "val_blob", id); + assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null"); + assertArrayEquals(expectedBlob(id), actualBlob, + "val_blob bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** + * Reads a BLOB/binary column as a {@code byte[]}. + * + *

    The prefetch cache materialises BLOBs as {@code byte[]} ({@link java.sql.Types#BINARY} / + * {@code VARBINARY}) when serving from cache, whereas a live DB query may return a + * {@link java.sql.Blob} object. Both representations are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + // Fallback: use getBytes + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java new file mode 100644 index 000000000..f8b6c79f2 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java @@ -0,0 +1,225 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an H2 backend. + * + *

    H2 supports the {@code LIMIT n OFFSET m} pagination syntax. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class H2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(H2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableH2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for H2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/h2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "H2 prefetch-cache tests are disabled " + + "(pass -DenableH2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "OJP_PFX_H2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INT           PRIMARY KEY  – 1-based row identifier
    +     *   name       VARCHAR(100)  NOT NULL     – "record_{id}"
    +     *   val_int    INT           NOT NULL     – id × 10
    +     *   val_bigint BIGINT        NOT NULL     – id × 1,000,000
    +     *   val_bool   BOOLEAN       NOT NULL     – true when id is even
    +     *   val_text   VARCHAR(255)  NOT NULL     – "text_value_for_row_{id}"
    +     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BOOLEAN NOT NULL," + + " val_text VARCHAR(255) NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setBoolean(5, i % 2 == 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binObj = rs.getObject("val_binary"); + assertNotNull(binObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..938eae010 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java @@ -0,0 +1,257 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a MySQL backend. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} LOB column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class MySQLMariaDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(MySQLMariaDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isMySQLTestEnabled; + private static boolean isMariaDBTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isMySQLTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMySQLPrefetchCacheTests", "false")); + isMariaDBTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMariaDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized tests – one run per row in each CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for MySQL. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mysql_prefetch_cache_connections_with_record_counts.csv") + void testMySQLPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMySQLTestEnabled, + "MySQL prefetch-cache tests are disabled " + + "(pass -DenableMySQLPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_mysql_"); + } + + /** + * Core pagination test for MariaDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mariadb_prefetch_cache_connections_with_record_counts.csv") + void testMariaDBPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMariaDBTestEnabled, + "MariaDB prefetch-cache tests are disabled " + + "(pass -DenableMariaDBPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_maria_"); + } + + // ------------------------------------------------------------------------- + // Shared implementation + // ------------------------------------------------------------------------- + + private void runPaginationTest(int recordCount, String driverClass, + String url, String user, String pwd, + String tablePrefix) + throws SQLException, ClassNotFoundException { + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = tablePrefix + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INT          PRIMARY KEY   – 1-based row identifier
    +     *   name       VARCHAR(100) NOT NULL      – "record_{id}"
    +     *   val_int    INT          NOT NULL      – id × 10
    +     *   val_bigint BIGINT       NOT NULL      – id × 1,000,000
    +     *   val_bool   TINYINT(1)   NOT NULL      – 1 when id is even, else 0
    +     *   val_text   TEXT         NOT NULL      – "text_value_for_row_{id}"
    +     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT PRIMARY KEY," + + " name VARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool TINYINT(1) NOT NULL," + + " val_text TEXT NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " LIMIT " + PAGE_SIZE + " OFFSET " + offset; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binaryObj = rs.getObject("val_binary"); + assertNotNull(binaryObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binaryObj instanceof byte[] ? (byte[]) binaryObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE IF EXISTS " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + /** + * Returns four deterministic bytes for a given {@code rowId}. + */ + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java new file mode 100644 index 000000000..31ce5cf9e --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java @@ -0,0 +1,259 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an Oracle backend. + * + *

    Oracle 12c+ supports the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination + * syntax, which is recognised by the OJP {@code PaginationDetector}. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code BLOB} column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code BLOB} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * + *

    Oracle type notes: + *

      + *
    • No native BOOLEAN SQL type (until Oracle 23c) → {@code NUMBER(1)} (0/1) is used.
    • + *
    • No BIGINT → {@code NUMBER(19,0)}.
    • + *
    • No TEXT → {@code VARCHAR2(255)}.
    • + *
    • Binary data → {@code BLOB}.
    • + *
    + */ +class OraclePaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(OraclePaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableOraclePrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for Oracle. + */ + @ParameterizedTest + @CsvFileSource(resources = "/oracle_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "Oracle prefetch-cache tests are disabled " + + "(pass -DenableOraclePrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_ora_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         NUMBER(10)   PRIMARY KEY   – 1-based row identifier
    +     *   name       VARCHAR2(100) NOT NULL     – "record_{id}"
    +     *   val_int    NUMBER(10)   NOT NULL      – id × 10
    +     *   val_bigint NUMBER(19,0) NOT NULL      – id × 1,000,000
    +     *   val_bool   NUMBER(1)    NOT NULL      – 1 when id is even, else 0
    +     *   val_text   VARCHAR2(255) NOT NULL     – "text_value_for_row_{id}"
    +     *   val_blob   BLOB         NOT NULL      – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + } catch (SQLException e) { + // Table does not exist – ignore + } + try (Statement stmt = conn.createStatement()) { + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id NUMBER(10) NOT NULL," + + " name VARCHAR2(100) NOT NULL," + + " val_int NUMBER(10) NOT NULL," + + " val_bigint NUMBER(19,0) NOT NULL," + + " val_bool NUMBER(1) NOT NULL," + + " val_text VARCHAR2(255) NOT NULL," + + " val_blob BLOB NOT NULL," + + " CONSTRAINT pk_" + tableName + " PRIMARY KEY (id)" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBlob(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // Oracle 12c+ OFFSET/FETCH syntax + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + byte[] actualBlob = toBlobBytes(rs, "val_blob", id); + assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null"); + assertArrayEquals(expectedBlob(id), actualBlob, + "val_blob bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** + * Reads a BLOB column as a {@code byte[]}. + * + *

    The prefetch cache materialises BLOBs as {@code byte[]} when serving from cache, + * whereas a live DB query returns a {@link java.sql.Blob} object. Both are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..58667b5fd --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -0,0 +1,270 @@ +package openjproxy.jdbc; + +import openjproxy.jdbc.testutil.SQLServerPrefetchCacheConnectionProvider; +import openjproxy.jdbc.testutil.SQLServerTestContainer; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a SQL Server backend. + * + *

    SQL Server uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * The SQL Server container is managed by TestContainers; the test connects via an OJP prefetch-cache + * server on port 10594. + * + *

    The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *

      + *
    1. Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
    2. + *
    3. Inserts the requested number of rows with fully deterministic, per-row values.
    4. + *
    5. Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    6. + *
    7. Asserts every column value, including a byte-exact comparison of the + * {@code VARBINARY} column.
    8. + *
    9. Drops the table on completion.
    10. + *
    + * + *

    This test is disabled by default and is activated by passing + * {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * + *

    SQL Server type notes: + *

      + *
    • No native BOOLEAN → {@code BIT} (0/1) is used.
    • + *
    • Binary data → {@code VARBINARY(32)}.
    • + *
    • Large text → {@code NVARCHAR(255)}.
    • + *
    + */ +@EnabledIf("openjproxy.jdbc.testutil.SQLServerTestContainer#isEnabled") +class SQLServerPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(SQLServerPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableSqlServerPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test + // ------------------------------------------------------------------------- + + /** + * Core pagination test for SQL Server. + * + *

    The record count is provided via {@code @ValueSource} and the connection details via + * {@link SQLServerPrefetchCacheConnectionProvider}. JUnit 5 does not support mixing + * two argument sources in a single {@code @ParameterizedTest}, so the test obtains the + * connection from the shared TestContainer directly and iterates over record counts. + */ + @ParameterizedTest + @ValueSource(ints = {99, 100, 101, 567, 1000}) + void testPaginationWithPrefetchCache(int recordCount) throws SQLException { + + assumeTrue(isTestEnabled, + "SQL Server prefetch-cache tests are disabled " + + "(pass -DenableSqlServerPrefetchCacheTests=true to enable)"); + + // Build connection via the prefetch-cache connection provider + String[] connArgs = getConnectionArgs(); + String url = connArgs[0]; + String user = connArgs[1]; + String pwd = connArgs[2]; + + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_mssql_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** Obtains [url, user, password] from the TestContainer via the prefetch-cache provider. */ + private static String[] getConnectionArgs() { + SQLServerTestContainer.getInstance(); + String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl(); + String username = SQLServerTestContainer.getUsername(); + String password = SQLServerTestContainer.getPassword(); + + String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "10594"); + String ojpProxyHost = System.getProperty("ojp.proxy.host", "localhost"); + + // strip "jdbc:" prefix and wrap with OJP proxy + String urlWithoutPrefix = containerJdbcUrl.startsWith("jdbc:") + ? containerJdbcUrl.substring("jdbc:".length()) + : containerJdbcUrl; + if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) { + urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb"; + } + String ojpUrl = "jdbc:ojp[" + ojpProxyHost + ":" + prefetchCachePort + "]_" + urlWithoutPrefix; + + return new String[]{ojpUrl, username, password}; + } + + /** + * Drops (if exists) and re-creates the test table. + * + *

    Schema: + *

    +     *   id         INT          PRIMARY KEY   – 1-based row identifier
    +     *   name       NVARCHAR(100) NOT NULL     – "record_{id}"
    +     *   val_int    INT          NOT NULL      – id × 10
    +     *   val_bigint BIGINT       NOT NULL      – id × 1,000,000
    +     *   val_bool   BIT          NOT NULL      – 1 when id is even, else 0
    +     *   val_text   NVARCHAR(255) NOT NULL     – "text_value_for_row_{id}"
    +     *   val_binary VARBINARY(32) NOT NULL     – four deterministic bytes derived from id
    +     * 
    + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName); + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INT NOT NULL PRIMARY KEY," + + " name NVARCHAR(100) NOT NULL," + + " val_int INT NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool BIT NOT NULL," + + " val_text NVARCHAR(255) NOT NULL," + + " val_binary VARBINARY(32) NOT NULL" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBinary(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // SQL Server: ORDER BY is required when using OFFSET/FETCH + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + Object binObj = rs.getObject("val_binary"); + assertNotNull(binObj, "val_binary for id=" + id + " must not be null"); + byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj + : rs.getBytes("val_binary"); + assertArrayEquals(expectedBinary(id), actualBytes, + "val_binary bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBinary(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java new file mode 100644 index 000000000..9ee45b248 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java @@ -0,0 +1,74 @@ +package openjproxy.jdbc.testutil; + +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.ArgumentsProvider; + +import java.util.stream.Stream; + +/** + * Custom {@link ArgumentsProvider} for SQL Server prefetch-cache integration tests. + * + *

    Provides connection details pointing to the OJP prefetch-cache server on port 10594 + * (instead of the standard port 1059 used by {@link SQLServerConnectionProvider}). + * The actual SQL Server instance is still supplied by {@link SQLServerTestContainer}. + */ +public class SQLServerPrefetchCacheConnectionProvider implements ArgumentsProvider { + + private static final String JDBC_PREFIX = "jdbc:"; + + /** The OJP server with the prefetch cache enabled runs on this port in CI. */ + private static final String PREFETCH_CACHE_PORT = + System.getProperty("ojp.prefetch.cache.port", "10594"); + private static final String OJP_PROXY_HOST = + System.getProperty("ojp.proxy.host", "localhost"); + private static final String PREFETCH_CACHE_ADDRESS = OJP_PROXY_HOST + ":" + PREFETCH_CACHE_PORT; + + @Override + public Stream provideArguments(ExtensionContext context) { + if (!SQLServerTestContainer.isEnabled()) { + return Stream.empty(); + } + + ConnectionProps result = getConnectionProps(); + return Stream.of( + Arguments.of(result.driverClass, result.ojpUrl, result.username, result.password) + ); + } + + @NotNull + private static ConnectionProps getConnectionProps() { + SQLServerTestContainer.getInstance(); + + String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl(); + String username = SQLServerTestContainer.getUsername(); + String password = SQLServerTestContainer.getPassword(); + + String driverClass = "org.openjproxy.jdbc.Driver"; + String urlWithoutPrefix = containerJdbcUrl.startsWith(JDBC_PREFIX) + ? containerJdbcUrl.substring(JDBC_PREFIX.length()) + : containerJdbcUrl; + + if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) { + urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb"; + } + + String ojpUrl = JDBC_PREFIX + "ojp[" + PREFETCH_CACHE_ADDRESS + "]_" + urlWithoutPrefix; + return new ConnectionProps(username, password, driverClass, ojpUrl); + } + + private static class ConnectionProps { + private final String username; + private final String password; + private final String driverClass; + private final String ojpUrl; + + ConnectionProps(String username, String password, String driverClass, String ojpUrl) { + this.username = username; + this.password = password; + this.driverClass = driverClass; + this.ojpUrl = ojpUrl; + } + } +} diff --git a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..a5d2dd22b --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, diff --git a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..5c9d3c61c --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass diff --git a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..04e81001b --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, diff --git a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..93d6fd48e --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..18d75f2fa --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv new file mode 100644 index 000000000..ffb4adc0c --- /dev/null +++ b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv @@ -0,0 +1,5 @@ +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword From d7cf13c02658f8e61587eb6394a453b21c736446 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 18 Mar 2026 22:37:11 +0000 Subject: [PATCH 19/22] fix(test): use getBoolean() for boolean-typed columns in pagination cache tests MariaDB TINYINT(1) and SQL Server BIT are both mapped to Java Boolean by their JDBC drivers. Calling rs.getInt() on these columns threw NumberFormatException("false"). Changed both tests to use getBoolean() with `id % 2 == 0`, consistent with PostgreSQL/H2/CockroachDB tests. Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java | 4 ++-- .../jdbc/SQLServerPaginationCacheIntegrationTest.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java index 938eae010..95e2edbdc 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java @@ -136,7 +136,7 @@ private void runPaginationTest(int recordCount, String driverClass, * name VARCHAR(100) NOT NULL – "record_{id}" * val_int INT NOT NULL – id × 10 * val_bigint BIGINT NOT NULL – id × 1,000,000 - * val_bool TINYINT(1) NOT NULL – 1 when id is even, else 0 + * val_bool TINYINT(1) NOT NULL – true (1) when id is even, else false (0) * val_text TEXT NOT NULL – "text_value_for_row_{id}" * val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id * @@ -209,7 +209,7 @@ private static int assertPage(Connection conn, String tableName, "val_int mismatch for id=" + id); assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), "val_bigint mismatch for id=" + id); - assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), "val_bool mismatch for id=" + id); assertEquals("text_value_for_row_" + id, rs.getString("val_text"), "val_text mismatch for id=" + id); diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java index 58667b5fd..cd62a0002 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -151,7 +151,7 @@ private static String[] getConnectionArgs() { * name NVARCHAR(100) NOT NULL – "record_{id}" * val_int INT NOT NULL – id × 10 * val_bigint BIGINT NOT NULL – id × 1,000,000 - * val_bool BIT NOT NULL – 1 when id is even, else 0 + * val_bool BIT NOT NULL – true (1) when id is even, else false (0) * val_text NVARCHAR(255) NOT NULL – "text_value_for_row_{id}" * val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id * @@ -225,7 +225,7 @@ private static int assertPage(Connection conn, String tableName, "val_int mismatch for id=" + id); assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), "val_bigint mismatch for id=" + id); - assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + assertEquals(id % 2 == 0, rs.getBoolean("val_bool"), "val_bool mismatch for id=" + id); assertEquals("text_value_for_row_" + id, rs.getString("val_text"), "val_text mismatch for id=" + id); From 4ba935404b6377e2f2aae062d744ddadb6cb772b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 19 Mar 2026 07:57:25 +0000 Subject: [PATCH 20/22] fix: forward ojp.nextPageCache.* client properties to server; update docs and tests for client-side config Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../configuration/ojp-server-configuration.md | 7 ++++- .../part2-chapter6-server-configuration.md | 14 +++++++-- .../features/NEXT_PAGE_PREFETCH_CACHE.md | 31 +++++++++++++------ .../jdbc/DatasourcePropertiesLoader.java | 8 +++-- ...roachDBPaginationCacheIntegrationTest.java | 9 ++++-- .../Db2PaginationCacheIntegrationTest.java | 9 ++++-- .../H2PaginationCacheIntegrationTest.java | 9 ++++-- ...MariaDBPaginationCacheIntegrationTest.java | 9 ++++-- .../OraclePaginationCacheIntegrationTest.java | 9 ++++-- ...ostgresPaginationCacheIntegrationTest.java | 9 ++++-- ...LServerPaginationCacheIntegrationTest.java | 9 ++++-- .../src/test/resources/ojp.properties | 4 +++ 12 files changed, 97 insertions(+), 30 deletions(-) diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index fed2b3347..5017ae006 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -177,9 +177,14 @@ The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET > **Per-datasource `enabled` is a client-side setting.** > Each datasource in the client application can independently opt in or out of the prefetch cache -> by setting `ojp.nextPageCache.enabled=false` in its `ojp.properties`: +> by setting `ojp.nextPageCache.enabled` in its `ojp.properties`. The value is sent to the server +> at connection time; when absent, the server's global flag applies as the fallback. > ```properties > # ojp.properties — client application +> +> # Default datasource: explicitly enable the cache +> ojp.nextPageCache.enabled=true +> > # Disable the prefetch cache for the "random-access" datasource > random-access.ojp.nextPageCache.enabled=false > ``` diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index 868807d2c..e98f5e8b0 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -447,7 +447,7 @@ java -Duser.timezone=UTC \ -jar ojp-server.jar ``` -**All prefetch cache settings:** +**Server-side settings (`ojp-server.properties` / JVM system properties):** | Property | Default | Description | |---|---|---| @@ -458,15 +458,23 @@ java -Duser.timezone=UTC \ | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps | | `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for the wait timeout (`` matches `ojp.datasource.name` on the client) | +**Client-side settings (`ojp.properties` in the client application):** + +| Property | Default | Description | +|---|---|---| +| `ojp.nextPageCache.enabled` | *(server global)* | Per-datasource opt-in/out; set to `false` to disable the cache for this datasource even when the server has it globally enabled | + ### Per-Datasource Cache Control The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the client application can independently opt in or out of the prefetch cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` file — no server restart needed: ```properties # ojp.properties — client application -# Default datasource: cache enabled (uses server global default) -# "random-access" datasource: disable the prefetch cache +# Default datasource: explicitly enable the cache +ojp.nextPageCache.enabled=true + +# "random-access" datasource: disable the prefetch cache for random-access workloads random-access.ojp.nextPageCache.enabled=false ``` diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md index bcf03dab0..b926a9f70 100644 --- a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -80,6 +80,8 @@ Each cache entry is keyed by **datasource identifier + normalised SQL**. Two dat ## Configuration Reference +### Server-Side Settings (`ojp-server.properties` / JVM system properties) + | Property | Default | Description | |---|---|---| | `ojp.server.nextPageCache.enabled` | `false` | Enable the feature globally (opt-in) | @@ -87,30 +89,39 @@ Each cache entry is keyed by **datasource identifier + normalised SQL**. Two dat | `ojp.server.nextPageCache.maxEntries` | `100` | Maximum cache entries across all datasources | | `ojp.server.nextPageCache.prefetchWaitTimeoutMs` | `5000` | Maximum wait (ms) for an in-flight prefetch before falling back to a live query | | `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction scans | -| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs` | +| `ojp.server.nextPageCache.datasource..prefetchWaitTimeoutMs` | *(global)* | Per-datasource override for `prefetchWaitTimeoutMs`; `` matches `ojp.datasource.name` sent by the client | + +### Client-Side Settings (`ojp.properties` in the client application) + +| Property | Default | Description | +|---|---|---| +| `ojp.nextPageCache.enabled` | *(server global)* | Per-datasource opt-in/out; when `false` the cache is disabled for this datasource even if the server has it globally enabled | + +The `enabled` flag is set in the client's `ojp.properties` file and is sent to the server at +connection time. When absent, the server's global `ojp.server.nextPageCache.enabled` value applies. ### Per-Datasource Configuration -The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the -client application can independently opt in or out of the prefetch cache by setting -`ojp.nextPageCache.enabled` in its `ojp.properties` file: +Each datasource in the client application can independently opt in or out of the prefetch cache: ```properties # ojp.properties — client application -# Default datasource: cache enabled (uses server global default) +# Default datasource: explicitly enable the cache +ojp.nextPageCache.enabled=true -# "olap" datasource: disable the prefetch cache +# "olap" datasource: disable the prefetch cache for random-access workloads olap.ojp.nextPageCache.enabled=false +``` + +The server-side `prefetchWaitTimeoutMs` can also be overridden per datasource (server configuration): -# Per-datasource timeout tuning (server-side) +```properties +# ojp-server.properties or JVM system properties ojp.server.nextPageCache.datasource.analytics.prefetchWaitTimeoutMs=10000 ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 ``` -The `prefetchWaitTimeoutMs` can be overridden on the server side per datasource name (which matches -the `ojp.datasource.name` connection property the client sends on connect). - ## Quick Start **Enable with defaults:** diff --git a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java index 0bed6378e..e659ba04b 100644 --- a/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java +++ b/ojp-jdbc-driver/src/main/java/org/openjproxy/jdbc/DatasourcePropertiesLoader.java @@ -24,6 +24,7 @@ public class DatasourcePropertiesLoader { private static final String DEFAULT_DATASOURCE_NAME = "default"; private static final String OJP_POOL_PREFIX = "ojp.connection.pool."; private static final String OJP_XA_PREFIX = "ojp.xa."; + private static final String OJP_NEXT_PAGE_CACHE_PREFIX = "ojp.nextPageCache."; /** * Load ojp.properties and extract configuration for the datasource identified by @@ -105,11 +106,14 @@ private static void applyEnvProperties(Properties result, String prefixDot, bool } private static boolean hasPrefixedOjpKey(String key, String prefixDot) { - return key.startsWith(prefixDot + OJP_POOL_PREFIX) || key.startsWith(prefixDot + OJP_XA_PREFIX); + return key.startsWith(prefixDot + OJP_POOL_PREFIX) + || key.startsWith(prefixDot + OJP_XA_PREFIX) + || key.startsWith(prefixDot + OJP_NEXT_PAGE_CACHE_PREFIX); } private static boolean isUnprefixedOjpKey(String key) { - return key.startsWith(OJP_POOL_PREFIX) || key.startsWith(OJP_XA_PREFIX); + return key.startsWith(OJP_POOL_PREFIX) || key.startsWith(OJP_XA_PREFIX) + || key.startsWith(OJP_NEXT_PAGE_CACHE_PREFIX); } private static void copyUnprefixedOjpProperties(Properties target, Properties source) { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java index 2ef63d2bc..f0ee7aec0 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java @@ -31,7 +31,9 @@ *

  • Creates a dedicated table with multiple column types, including a {@code BYTEA} column.
  • *
  • Inserts the requested number of rows with fully deterministic, per-row values.
  • *
  • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
  • + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect. *
  • Asserts every column value, including a byte-exact comparison of the * {@code BYTEA} column.
  • *
  • Drops the table on completion.
  • @@ -39,7 +41,10 @@ * *

    This test is disabled by default and is activated by passing * {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. */ class CockroachDBPaginationCacheIntegrationTest { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java index 38c4f248d..e88472a52 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java @@ -30,7 +30,9 @@ * including a {@code BLOB} column. *

  • Inserts the requested number of rows with fully deterministic, per-row values.
  • *
  • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
  • + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect. *
  • Asserts every column value, including a byte-exact comparison of the * {@code BLOB} column.
  • *
  • Drops the table on completion.
  • @@ -38,7 +40,10 @@ * *

    This test is disabled by default and is activated by passing * {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. */ class Db2PaginationCacheIntegrationTest { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java index f8b6c79f2..4ea88b3b6 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java @@ -29,7 +29,9 @@ *

  • Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
  • *
  • Inserts the requested number of rows with fully deterministic, per-row values.
  • *
  • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
  • + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect. *
  • Asserts every column value, including a byte-exact comparison of the * {@code VARBINARY} column.
  • *
  • Drops the table on completion.
  • @@ -37,7 +39,10 @@ * *

    This test is disabled by default and is activated by passing * {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. */ class H2PaginationCacheIntegrationTest { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java index 95e2edbdc..5f8d9acb5 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java @@ -27,7 +27,9 @@ *

  • Creates a dedicated table with multiple column types, including a {@code VARBINARY} LOB column.
  • *
  • Inserts the requested number of rows with fully deterministic, per-row values.
  • *
  • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
  • + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect. *
  • Asserts every column value, including a byte-exact comparison of the * {@code VARBINARY} column.
  • *
  • Drops the table on completion.
  • @@ -35,7 +37,10 @@ * *

    This test is disabled by default and is activated by passing * {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. */ class MySQLMariaDBPaginationCacheIntegrationTest { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java index 31ce5cf9e..d4550380e 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java @@ -30,7 +30,9 @@ *

  • Creates a dedicated table with multiple column types, including a {@code BLOB} column.
  • *
  • Inserts the requested number of rows with fully deterministic, per-row values.
  • *
  • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
  • + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect. *
  • Asserts every column value, including a byte-exact comparison of the * {@code BLOB} column.
  • *
  • Drops the table on completion.
  • @@ -38,7 +40,10 @@ * *

    This test is disabled by default and is activated by passing * {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. * *

    Oracle type notes: *

      diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java index 0e85469c6..9271bda5f 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java @@ -28,7 +28,9 @@ *
    • Creates a dedicated table with multiple column types, including a {@code BYTEA} LOB column.
    • *
    • Inserts the requested number of rows with fully deterministic, per-row values.
    • *
    • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    • + * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the + * per-datasource opt-in sent to the server on connect. *
    • Asserts every column value, including a byte-exact comparison of the * {@code BYTEA} column.
    • *
    • Drops the table on completion.
    • @@ -36,7 +38,10 @@ * *

      This test is disabled by default and is activated by passing * {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. */ class PostgresPaginationCacheIntegrationTest { diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java index cd62a0002..50a0bab87 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -35,7 +35,9 @@ *

    • Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
    • *
    • Inserts the requested number of rows with fully deterministic, per-row values.
    • *
    • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594).
    • + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, + * which is the per-datasource opt-in sent to the server on connect. *
    • Asserts every column value, including a byte-exact comparison of the * {@code VARBINARY} column.
    • *
    • Drops the table on completion.
    • @@ -43,7 +45,10 @@ * *

      This test is disabled by default and is activated by passing * {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * The target OJP server must already be running on port 10594 with + * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag + * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to + * the server at connection time to explicitly opt this datasource into the cache. * *

      SQL Server type notes: *

        diff --git a/ojp-jdbc-driver/src/test/resources/ojp.properties b/ojp-jdbc-driver/src/test/resources/ojp.properties index 0330b4734..bacc62b5b 100644 --- a/ojp-jdbc-driver/src/test/resources/ojp.properties +++ b/ojp-jdbc-driver/src/test/resources/ojp.properties @@ -5,6 +5,10 @@ ojp.connection.pool.idleTimeout=2000 ojp.connection.pool.maxLifetime=1200000 ojp.connection.pool.connectionTimeout=20000 +# Enable the next-page prefetch cache for this client (client-side per-datasource setting). +# The server must also have ojp.server.nextPageCache.enabled=true for this to take effect. +ojp.nextPageCache.enabled=true + # Multinode-specific configuration for non-XA connections multinode.ojp.connection.pool.maximumPoolSize=22 multinode.ojp.connection.pool.minimumIdle=20 From 3a930568021152eef5504721758b45508e8cc4cf Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 19 Mar 2026 09:25:18 +0000 Subject: [PATCH 21/22] refactor(ci): merge prefetch-cache server into main OJP server; update CSV ports and test docs Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .github/workflows/main.yml | 98 ++----------------- ...roachDBPaginationCacheIntegrationTest.java | 4 +- .../Db2PaginationCacheIntegrationTest.java | 4 +- .../H2PaginationCacheIntegrationTest.java | 4 +- ...MariaDBPaginationCacheIntegrationTest.java | 4 +- .../OraclePaginationCacheIntegrationTest.java | 4 +- ...ostgresPaginationCacheIntegrationTest.java | 6 +- ...LServerPaginationCacheIntegrationTest.java | 8 +- ...ServerPrefetchCacheConnectionProvider.java | 8 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- ...h_cache_connections_with_record_counts.csv | 10 +- 16 files changed, 64 insertions(+), 146 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 574333ae2..8243449ba 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -98,16 +98,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -131,9 +124,6 @@ jobs: if: always() # ensures it runs even if previous steps fail run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 2: PostgreSQL Integration Tests @@ -217,7 +207,7 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT # Start second OJP server WITH SQL enhancer enabled in OPTIMIZE mode @@ -232,15 +222,6 @@ jobs: -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10593 -Dojp.prometheus.port=9163 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.sql.enhancer.enabled=true -Dojp.sql.enhancer.mode=OPTIMIZE -Dojp.sql.enhancer.dialect=POSTGRESQL" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Start third OJP server WITH next-page prefetch cache enabled - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - - name: Wait for ojp-server to start run: sleep 10 @@ -288,9 +269,6 @@ jobs: echo "" echo "=== OJP Server (with SQL enhancer) log ===" docker logs ojp-server-enhancer 2>&1 || echo "ojp-server-enhancer container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 3: MySQL Integration Tests @@ -365,16 +343,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -397,9 +368,6 @@ jobs: if: always() run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # =========================================================================== @@ -473,16 +441,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -505,9 +466,6 @@ jobs: if: always() run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 5: CockroachDB Integration Tests @@ -572,16 +530,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -604,9 +555,6 @@ jobs: if: always() run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 6: DB2 Integration Tests @@ -752,16 +700,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -784,9 +725,6 @@ jobs: if: always() run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 7: Multinode Integration Tests @@ -1760,16 +1698,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -1792,9 +1723,6 @@ jobs: if: always() # ensures it runs even if previous steps fail run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 10: SQL Server Integration Tests @@ -1875,16 +1803,9 @@ jobs: run: | docker run -d --name ojp-server \ --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \ + -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ rrobetti/ojp:0.4.1-SNAPSHOT - # Pagination-cache integration tests run against this server (port 10594) - - name: Start OJP Server container (prefetch cache on port 10594) - run: | - docker run -d --name ojp-server-prefetch-cache \ - --network host \ - -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \ - rrobetti/ojp:0.4.1-SNAPSHOT - name: Wait for ojp-server to start run: sleep 10 @@ -1913,9 +1834,6 @@ jobs: if: always() # ensures it runs even if previous steps fail run: | docker logs ojp-server 2>&1 || echo "ojp-server container not found" - echo "" - echo "=== OJP Server (with prefetch cache) log ===" - docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found" # =========================================================================== # JOB 11: Notify Integration Repository diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java index f0ee7aec0..00442d192 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java @@ -31,7 +31,7 @@ *
      • Creates a dedicated table with multiple column types, including a {@code BYTEA} column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the * per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -41,7 +41,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java index e88472a52..6772df9b9 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java @@ -30,7 +30,7 @@ * including a {@code BLOB} column.

      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, * which is the per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -40,7 +40,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java index 4ea88b3b6..5ad694b4b 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java @@ -29,7 +29,7 @@ *

      • Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the * per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -39,7 +39,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java index 5f8d9acb5..745cd39b6 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java @@ -27,7 +27,7 @@ *

      • Creates a dedicated table with multiple column types, including a {@code VARBINARY} LOB column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the * per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -37,7 +37,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java index d4550380e..3a989cad9 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java @@ -30,7 +30,7 @@ *

      • Creates a dedicated table with multiple column types, including a {@code BLOB} column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, * which is the per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -40,7 +40,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java index 9271bda5f..5cbec47d1 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java @@ -28,7 +28,7 @@ *

      • Creates a dedicated table with multiple column types, including a {@code BYTEA} LOB column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code LIMIT 100 OFFSET …} against an OJP server instance - * that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). The client also sets + * that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). The client also sets * {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, which is the * per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -38,7 +38,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. @@ -71,7 +71,7 @@ static void checkTestConfiguration() { * * @param recordCount total rows to insert and paginate over * @param driverClass fully-qualified OJP driver class (loaded as a side-effect) - * @param url JDBC URL pointing at the prefetch-cache OJP server (port 10594) + * @param url JDBC URL pointing at the prefetch-cache OJP server (default port 1059) * @param user database user * @param pwd database password */ diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java index 50a0bab87..42c399b0e 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -27,7 +27,7 @@ * *

        SQL Server uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. * The SQL Server container is managed by TestContainers; the test connects via an OJP prefetch-cache - * server on port 10594. + * server on default port 1059. * *

        The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise * boundary conditions around the 100-record page size. For each count the test: @@ -35,7 +35,7 @@ *

      • Creates a dedicated table with multiple column types, including a {@code VARBINARY} column.
      • *
      • Inserts the requested number of rows with fully deterministic, per-row values.
      • *
      • Paginates through all rows using {@code OFFSET … ROWS FETCH NEXT 100 ROWS ONLY} against an - * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (port 10594). + * OJP server instance that has {@code ojp.server.nextPageCache.enabled=true} (default port 1059). * The client also sets {@code ojp.nextPageCache.enabled=true} in {@code ojp.properties}, * which is the per-datasource opt-in sent to the server on connect.
      • *
      • Asserts every column value, including a byte-exact comparison of the @@ -45,7 +45,7 @@ * *

        This test is disabled by default and is activated by passing * {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI. - * The target OJP server must already be running on port 10594 with + * The target OJP server must already be running on default port 1059 with * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to * the server at connection time to explicitly opt this datasource into the cache. @@ -132,7 +132,7 @@ private static String[] getConnectionArgs() { String username = SQLServerTestContainer.getUsername(); String password = SQLServerTestContainer.getPassword(); - String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "10594"); + String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "1059"); String ojpProxyHost = System.getProperty("ojp.proxy.host", "localhost"); // strip "jdbc:" prefix and wrap with OJP proxy diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java index 9ee45b248..18ad828b4 100644 --- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java @@ -10,17 +10,17 @@ /** * Custom {@link ArgumentsProvider} for SQL Server prefetch-cache integration tests. * - *

        Provides connection details pointing to the OJP prefetch-cache server on port 10594 - * (instead of the standard port 1059 used by {@link SQLServerConnectionProvider}). + *

        Provides connection details pointing to the OJP server (default port 1059) with the + * next-page prefetch cache enabled via the client-side property {@code ojp.nextPageCache.enabled}. * The actual SQL Server instance is still supplied by {@link SQLServerTestContainer}. */ public class SQLServerPrefetchCacheConnectionProvider implements ArgumentsProvider { private static final String JDBC_PREFIX = "jdbc:"; - /** The OJP server with the prefetch cache enabled runs on this port in CI. */ + /** OJP server host:port used for prefetch-cache tests (defaults to standard port 1059). */ private static final String PREFETCH_CACHE_PORT = - System.getProperty("ojp.prefetch.cache.port", "10594"); + System.getProperty("ojp.prefetch.cache.port", "1059"); private static final String OJP_PROXY_HOST = System.getProperty("ojp.proxy.host", "localhost"); private static final String PREFETCH_CACHE_ADDRESS = OJP_PROXY_HOST + ":" + PREFETCH_CACHE_PORT; diff --git a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv index a5d2dd22b..75e108955 100644 --- a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root, diff --git a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv index 5c9d3c61c..d21d2b7cd 100644 --- a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass diff --git a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv index 04e81001b..c653232c4 100644 --- a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa, +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa, diff --git a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv index 93d6fd48e..f7a2f41f1 100644 --- a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv index 18d75f2fa..bddb835d2 100644 --- a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv index ffb4adc0c..757148ccb 100644 --- a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword diff --git a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv index bd7d1f891..a06451d49 100644 --- a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv +++ b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv @@ -1,5 +1,5 @@ -99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword -100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword -101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword -567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword -1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword +99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword +1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword From c0da110302ab3803e76365e0aa38355b4c86eab1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 19 Mar 2026 09:51:32 +0000 Subject: [PATCH 22/22] docs: clarify two-tier config model for next-page prefetch cache across all three docs Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com> --- .../configuration/ojp-server-configuration.md | 5 ++- .../part2-chapter6-server-configuration.md | 33 +++++++++++-------- .../features/NEXT_PAGE_PREFETCH_CACHE.md | 13 ++++++-- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md index 5017ae006..bde1b77cc 100644 --- a/documents/configuration/ojp-server-configuration.md +++ b/documents/configuration/ojp-server-configuration.md @@ -164,7 +164,10 @@ For full integration examples including Docker Compose setups, see the **[Teleme The prefetch cache transparently pre-executes the **next page query** in the background while the current page is being sent to the client. When the client requests the next page, the rows are served from memory instead of hitting the database again, significantly reducing perceived latency for paginated result sets. -The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`). No client changes are needed — the feature is entirely transparent. +The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`). + +> **Two-tier configuration model:** +> The cache uses a two-tier configuration model. The **server administrator** enables the feature globally and tunes its resource limits (TTL, max entries, timeouts). Each **client application** then controls, per datasource, whether that datasource uses the cache — without requiring a server restart. See the client-side settings below. | Property | Environment Variable | Type | Default | Description | Since | |---|---|---|---|---|---| diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md index e98f5e8b0..8b63ce977 100644 --- a/documents/ebook/part2-chapter6-server-configuration.md +++ b/documents/ebook/part2-chapter6-server-configuration.md @@ -439,7 +439,12 @@ The cache key combines the datasource identifier and the normalised SQL text, so ### Configuration -The prefetch cache is **disabled by default**. Enable it with a single property: +The prefetch cache uses a **two-tier configuration model**: + +- **Server administrator** enables the global cache infrastructure and tunes resource limits (TTL, max entries, timeouts) in `ojp-server.properties` or as JVM system properties. +- **Client application** controls, per datasource, whether that datasource uses the cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` — without requiring a server restart. + +**Step 1 — Server administrator: enable the infrastructure** ```bash java -Duser.timezone=UTC \ @@ -447,6 +452,18 @@ java -Duser.timezone=UTC \ -jar ojp-server.jar ``` +**Step 2 — Client application: opt in per datasource** (`ojp.properties`) + +```properties +# Default datasource — explicitly opt in +ojp.nextPageCache.enabled=true + +# "random-access" datasource — opt out even though server has the cache enabled +random-access.ojp.nextPageCache.enabled=false +``` + +When a datasource does not set `ojp.nextPageCache.enabled`, the server's global `ojp.server.nextPageCache.enabled` value is used as the fallback. + **Server-side settings (`ojp-server.properties` / JVM system properties):** | Property | Default | Description | @@ -466,19 +483,7 @@ java -Duser.timezone=UTC \ ### Per-Datasource Cache Control -The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the client application can independently opt in or out of the prefetch cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` file — no server restart needed: - -```properties -# ojp.properties — client application - -# Default datasource: explicitly enable the cache -ojp.nextPageCache.enabled=true - -# "random-access" datasource: disable the prefetch cache for random-access workloads -random-access.ojp.nextPageCache.enabled=false -``` - -**Per-datasource wait timeout (different DB response times):** +While each client datasource controls its `enabled` flag (shown in the "Configuration" section above), the server administrator can also tune the prefetch wait timeout on a per-datasource basis — useful when different databases have significantly different response times: ```bash java -Duser.timezone=UTC \ diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md index b926a9f70..dab55eaf5 100644 --- a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md +++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md @@ -124,14 +124,23 @@ ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000 ## Quick Start -**Enable with defaults:** +**Step 1 — Server administrator: enable the cache infrastructure** ```bash java -Duser.timezone=UTC \ -Dojp.server.nextPageCache.enabled=true \ -jar ojp-server.jar ``` -**Tuned for a reporting workload:** +**Step 2 — Client application: opt in per datasource** (`ojp.properties`) +```properties +# Default datasource: enable the prefetch cache +ojp.nextPageCache.enabled=true + +# "olap" datasource: disable cache for random-access workloads +olap.ojp.nextPageCache.enabled=false +``` + +**Tuned server settings for a reporting workload:** ```bash java -Duser.timezone=UTC \ -Dojp.server.nextPageCache.enabled=true \