From b8194f53d71d2e4d874ee6936cf313eedca987b2 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Tue, 17 Mar 2026 07:58:59 +0000
Subject: [PATCH 01/22] Initial plan
From bc3c983ad3f7e6df976e9cf55bd7ea45f0d23b89 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Tue, 17 Mar 2026 08:28:20 +0000
Subject: [PATCH 02/22] feat: add next-page prefetch cache for paginated SELECT
queries
Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com>
---
.../grpc/server/ServerConfiguration.java | 49 ++-
.../grpc/server/StatementServiceImpl.java | 101 +++++
.../grpc/server/paging/CachedPage.java | 58 +++
.../server/paging/NextPagePrefetchCache.java | 390 ++++++++++++++++++
.../grpc/server/paging/PageInfo.java | 49 +++
.../server/paging/PaginationDetector.java | 201 +++++++++
.../paging/NextPagePrefetchCacheTest.java | 241 +++++++++++
.../server/paging/PaginationDetectorTest.java | 277 +++++++++++++
8 files changed, 1365 insertions(+), 1 deletion(-)
create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/CachedPage.java
create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCache.java
create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PageInfo.java
create mode 100644 ojp-server/src/main/java/org/openjproxy/grpc/server/paging/PaginationDetector.java
create mode 100644 ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java
create mode 100644 ojp-server/src/test/java/org/openjproxy/grpc/server/paging/PaginationDetectorTest.java
diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java
index c2893d67f..7fca87637 100644
--- a/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java
+++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/ServerConfiguration.java
@@ -68,6 +68,12 @@ public class ServerConfiguration {
private static final String TELEMETRY_GRPC_METRICS_ENABLED_KEY = "ojp.telemetry.grpc.metrics.enabled";
private static final String TELEMETRY_POOL_METRICS_ENABLED_KEY = "ojp.telemetry.pool.metrics.enabled";
+ // Next-page prefetch cache configuration keys
+ private static final String NEXT_PAGE_CACHE_ENABLED_KEY = "ojp.server.nextPageCache.enabled";
+ private static final String NEXT_PAGE_CACHE_TTL_SECONDS_KEY = "ojp.server.nextPageCache.ttlSeconds";
+ private static final String NEXT_PAGE_CACHE_MAX_ENTRIES_KEY = "ojp.server.nextPageCache.maxEntries";
+ private static final String NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY = "ojp.server.nextPageCache.prefetchWaitTimeoutMs";
+
// TLS configuration keys
private static final String TLS_ENABLED_KEY = "ojp.server.tls.enabled";
private static final String TLS_KEYSTORE_PATH_KEY = "ojp.server.tls.keystore.path";
@@ -135,6 +141,12 @@ public class ServerConfiguration {
public static final boolean DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled
public static final boolean DEFAULT_TELEMETRY_POOL_METRICS_ENABLED = true; // Enabled by default when OpenTelemetry is enabled
+ // Next-page prefetch cache default values
+ public static final boolean DEFAULT_NEXT_PAGE_CACHE_ENABLED = false; // Disabled by default, opt-in
+ public static final long DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS = 300; // 5 minutes
+ public static final int DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES = 100;
+ public static final long DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS = 5000; // 5 seconds
+
// TLS default values
public static final boolean DEFAULT_TLS_ENABLED = false; // Disabled by default for backwards compatibility
public static final boolean DEFAULT_TLS_CLIENT_AUTH_REQUIRED = false; // mTLS disabled by default
@@ -211,6 +223,12 @@ public class ServerConfiguration {
private final boolean tlsClientAuthRequired;
+ // Next-page prefetch cache configuration
+ private final boolean nextPageCacheEnabled;
+ private final long nextPageCacheTtlSeconds;
+ private final int nextPageCacheMaxEntries;
+ private final long nextPageCachePrefetchWaitTimeoutMs;
+
public ServerConfiguration() {
this.serverPort = getIntProperty(SERVER_PORT_KEY, DEFAULT_SERVER_PORT);
this.prometheusPort = getIntProperty(PROMETHEUS_PORT_KEY, DEFAULT_PROMETHEUS_PORT);
@@ -274,6 +292,12 @@ public ServerConfiguration() {
this.telemetryGrpcMetricsEnabled = getBooleanProperty(TELEMETRY_GRPC_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_GRPC_METRICS_ENABLED);
this.telemetryPoolMetricsEnabled = getBooleanProperty(TELEMETRY_POOL_METRICS_ENABLED_KEY, DEFAULT_TELEMETRY_POOL_METRICS_ENABLED);
+ // Next-page prefetch cache configuration
+ this.nextPageCacheEnabled = getBooleanProperty(NEXT_PAGE_CACHE_ENABLED_KEY, DEFAULT_NEXT_PAGE_CACHE_ENABLED);
+ this.nextPageCacheTtlSeconds = getLongProperty(NEXT_PAGE_CACHE_TTL_SECONDS_KEY, DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS);
+ this.nextPageCacheMaxEntries = getIntProperty(NEXT_PAGE_CACHE_MAX_ENTRIES_KEY, DEFAULT_NEXT_PAGE_CACHE_MAX_ENTRIES);
+ this.nextPageCachePrefetchWaitTimeoutMs = getLongProperty(NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS_KEY, DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS);
+
logConfigurationSummary();
}
@@ -416,6 +440,13 @@ private void logConfigurationSummary() {
logger.info(" Tracing Service Name: {}", tracingServiceName);
logger.info(" Tracing Sample Rate: {}", tracingSampleRate);
}
+ logger.info("Next-Page Prefetch Cache Configuration:");
+ logger.info(" Next-Page Cache Enabled: {}", nextPageCacheEnabled);
+ if (nextPageCacheEnabled) {
+ logger.info(" Next-Page Cache TTL: {} seconds", nextPageCacheTtlSeconds);
+ logger.info(" Next-Page Cache Max Entries: {}", nextPageCacheMaxEntries);
+ logger.info(" Next-Page Cache Prefetch Wait Timeout: {} ms", nextPageCachePrefetchWaitTimeoutMs);
+ }
}
/**
@@ -641,5 +672,21 @@ public boolean isTelemetryGrpcMetricsEnabled() {
public boolean isTelemetryPoolMetricsEnabled() {
return telemetryPoolMetricsEnabled;
}
-
+
+ public boolean isNextPageCacheEnabled() {
+ return nextPageCacheEnabled;
+ }
+
+ public long getNextPageCacheTtlSeconds() {
+ return nextPageCacheTtlSeconds;
+ }
+
+ public int getNextPageCacheMaxEntries() {
+ return nextPageCacheMaxEntries;
+ }
+
+ public long getNextPageCachePrefetchWaitTimeoutMs() {
+ return nextPageCachePrefetchWaitTimeoutMs;
+ }
+
}
\ No newline at end of file
diff --git a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java
index 1aacb6a1b..ffb42dfdb 100644
--- a/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java
+++ b/ojp-server/src/main/java/org/openjproxy/grpc/server/StatementServiceImpl.java
@@ -21,9 +21,12 @@
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
+import org.openjproxy.constants.CommonConstants;
import org.openjproxy.grpc.ProtoConverter;
+import org.openjproxy.grpc.dto.OpQueryResult;
import org.openjproxy.grpc.dto.Parameter;
import org.openjproxy.grpc.server.action.resource.CallResourceAction;
+import org.openjproxy.grpc.server.action.session.ResultSetHelper;
import org.openjproxy.grpc.server.action.session.TerminateSessionAction;
import org.openjproxy.grpc.server.action.transaction.CommitTransactionAction;
import org.openjproxy.grpc.server.action.transaction.RollbackTransactionAction;
@@ -35,6 +38,11 @@
import org.openjproxy.grpc.server.action.xa.XaRecoverAction;
import org.openjproxy.grpc.server.action.xa.XaRollbackAction;
import org.openjproxy.grpc.server.action.xa.XaStartAction;
+import org.openjproxy.grpc.server.paging.CachedPage;
+import org.openjproxy.grpc.server.paging.NextPagePrefetchCache;
+import org.openjproxy.grpc.server.paging.PageInfo;
+import org.openjproxy.grpc.server.paging.PaginationDetector;
+import org.openjproxy.grpc.server.resultset.ResultSetWrapper;
import org.openjproxy.grpc.server.statement.StatementFactory;
import org.openjproxy.xa.pool.XATransactionRegistry;
import org.openjproxy.xa.pool.spi.XAConnectionPoolProvider;
@@ -47,6 +55,7 @@
import java.sql.SQLDataException;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -72,6 +81,9 @@ public class StatementServiceImpl extends StatementServiceGrpc.StatementServiceI
// SQL Enhancer Engine for query optimization
private final org.openjproxy.grpc.server.sql.SqlEnhancerEngine sqlEnhancerEngine;
+ // Next-page prefetch cache for paginated queries (disabled by default)
+ private final NextPagePrefetchCache nextPagePrefetchCache;
+
// Multinode XA coordinator for distributing transaction limits
private static final MultinodeXaCoordinator xaCoordinator = new MultinodeXaCoordinator();
@@ -90,6 +102,12 @@ public StatementServiceImpl(SessionManager sessionManager, CircuitBreakerRegistr
// Server configuration for creating segregation managers
this.sqlEnhancerEngine = new org.openjproxy.grpc.server.sql.SqlEnhancerEngine(
serverConfiguration.isSqlEnhancerEnabled());
+ // Next-page prefetch cache (disabled by default)
+ this.nextPagePrefetchCache = new NextPagePrefetchCache(
+ serverConfiguration.isNextPageCacheEnabled(),
+ serverConfiguration.getNextPageCacheMaxEntries(),
+ serverConfiguration.getNextPageCacheTtlSeconds(),
+ serverConfiguration.getNextPageCachePrefetchWaitTimeoutMs());
initializeXAPoolProvider();
// Create SQL statement metrics from the registered OpenTelemetry instance (if available)
@@ -287,18 +305,101 @@ private void executeQueryInternal(StatementRequest request, StreamObserver
The method returns immediately. If an entry for {@code nextPageSql} - * already exists (either in-progress or completed), no new prefetch is started. - * Entries are evicted lazily when the cache exceeds {@code maxEntries}.
+ *The method returns immediately. If an entry for {@code datasourceId} + + * {@code nextPageSql} already exists (either in-progress or completed), no new + * prefetch is started. Entries are evicted lazily when the cache exceeds + * {@code maxEntries}.
* *BLOB/CLOB parameters are not supported; if any parameter has type * {@code BLOB} or {@code CLOB} the prefetch is silently skipped.
* * @param dataSource the DataSource from which to obtain a dedicated prefetch connection + * @param datasourceId the unique identifier of the datasource (e.g. connection hash); + * used to scope the cache entry so two datasources do not share pages * @param nextPageSql the SQL for the next page (produced by {@link PaginationDetector#buildNextPageSql}) * @param params the query parameters (may be null or empty for non-prepared queries) */ - public void prefetchAsync(DataSource dataSource, String nextPageSql, ListThe test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class PostgresPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(PostgresPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enablePostgresPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test. + * + *
The CSV provides five combinations of record count × connection details so that the + * same test method covers: a partial last page (99), exactly one full page (100), + * one full page plus one row (101), a non-round number (567), and a 10-page set (1000). + * + * @param recordCount total rows to insert and paginate over + * @param driverClass fully-qualified OJP driver class (loaded as a side-effect) + * @param url JDBC URL pointing at the prefetch-cache OJP server (port 10594) + * @param user database user + * @param pwd database password + */ + @ParameterizedTest + @CsvFileSource(resources = "/postgres_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeFalse(!isTestEnabled, + "Postgres prefetch-cache tests are disabled " + + "(pass -DenablePostgresPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + // Table name is unique per record-count so parallel executions don't collide + String tableName = "ojp_pfx_pg_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // ------------------------------------------------------------------ + // 1. Setup: fresh table + batch insert + // ------------------------------------------------------------------ + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + // ------------------------------------------------------------------ + // 2. Paginate and assert every value on every row + // ------------------------------------------------------------------ + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + // ------------------------------------------------------------------ + // 3. Cleanup + // ------------------------------------------------------------------ + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INT PRIMARY KEY – 1-based row identifier
+ * name VARCHAR NOT NULL – "record_{id}"
+ * val_int INT NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool BOOLEAN NOT NULL – true when id is even
+ * val_text TEXT NOT NULL – "text_value_for_row_{id}"
+ * val_bytea BYTEA NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id INT PRIMARY KEY," +
+ " name VARCHAR(100) NOT NULL," +
+ " val_int INT NOT NULL," +
+ " val_bigint BIGINT NOT NULL," +
+ " val_bool BOOLEAN NOT NULL," +
+ " val_text TEXT NOT NULL," +
+ " val_bytea BYTEA NOT NULL" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ /**
+ * Inserts {@code recordCount} rows using a {@link PreparedStatement} batch for efficiency.
+ */
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setBoolean(5, i % 2 == 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBytea(i));
+ ps.addBatch();
+
+ // Flush in chunks to avoid oversized batches
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ /**
+ * Queries one page ({@code LIMIT PAGE_SIZE OFFSET offset}), asserts every column value
+ * for every row on the page, and returns the number of rows actually returned.
+ */
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " LIMIT " + PAGE_SIZE + " OFFSET " + offset;
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0, rs.getBoolean("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ // BYTEA: the prefetch cache materialises BINARY/VARBINARY as byte[].
+ // PostgreSQL JDBC may also represent BYTEA as its hex escape string
+ // (e.g. "\\x01020304") when retrieved via getObject(); both forms are
+ // accepted here and compared byte-for-byte.
+ assertBytea(expectedBytea(id), rs.getObject("val_bytea"),
+ "val_bytea for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ /** Drops the test table, ignoring errors (e.g., table does not exist). */
+ private static void dropTable(Connection conn, String tableName) {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ logger.debug("Dropped table {}", tableName);
+ } catch (SQLException e) {
+ logger.warn("Could not drop table {}: {}", tableName, e.getMessage());
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Data-generation helpers
+ // -------------------------------------------------------------------------
+
+ /**
+ * Returns four deterministic bytes for a given {@code rowId}:
+ * If a per-datasource override is configured via
+ * {@code ojp.server.nextPageCache.datasource.
Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.
+ * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param timeoutMs the maximum time in milliseconds to wait for an in-progress + * prefetch before falling back to a live DB query + */ + public void registerDatasourcePrefetchWaitTimeout(String datasourceId, long timeoutMs) { + if (datasourceId != null) { + datasourcePrefetchWaitTimeoutMs.put(datasourceId, timeoutMs); + log.debug("Registered per-datasource prefetchWaitTimeoutMs={} for datasourceId={}", + timeoutMs, datasourceId); + } + } + /** * Cancels this instance's periodic cleanup task on the shared executor. * The shared executor itself is left running so that other cache instances @@ -204,8 +232,11 @@ public OptionalThe dataSource name corresponds to the {@code ojp.datasource.name} property + * set in the client connection properties.
+ * + * @param connectionDetails the connection details whose properties to inspect + * @return the datasource name, or {@code "default"} when none is set */ - private static String extractDataSourceName(ConnectionDetails connectionDetails) { + public static String extractDataSourceName(ConnectionDetails connectionDetails) { if (connectionDetails.getPropertiesList().isEmpty()) { return "default"; } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java index 4721aa3f3..7a72b75d9 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/NextPageCacheConfigurationTest.java @@ -186,4 +186,82 @@ void defaultCleanupInterval_is60Seconds() { void defaultTtlSeconds_is60Seconds() { assertEquals(60L, ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_TTL_SECONDS); } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void perDatasource_prefetchWaitTimeoutMs_isRespected() { + System.setProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs", "1500"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1500L, config.getNextPageCachePrefetchWaitTimeoutMs("my-db")); + + System.clearProperty("ojp.server.nextPageCache.datasource.my-db.prefetchWaitTimeoutMs"); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_whenNotSet() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "8000"); + + ServerConfiguration config = new ServerConfiguration(); + + // Datasource "unknown" has no per-datasource property set + assertEquals(8000L, config.getNextPageCachePrefetchWaitTimeoutMs("unknown-ds")); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forNullName() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "3000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(3000L, config.getNextPageCachePrefetchWaitTimeoutMs(null)); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_prefetchWaitTimeoutMs_fallsBackToGlobalDefault_forDefaultName() { + System.setProperty(WAIT_TIMEOUT_MS_KEY, "4000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(4000L, config.getNextPageCachePrefetchWaitTimeoutMs("default")); + + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } + + @Test + void perDatasource_invalidPrefetchWaitTimeout_fallsBackToGlobalDefault() { + System.setProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs", "not-a-number"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(ServerConfiguration.DEFAULT_NEXT_PAGE_CACHE_PREFETCH_WAIT_TIMEOUT_MS, + config.getNextPageCachePrefetchWaitTimeoutMs("bad-ds")); + + System.clearProperty("ojp.server.nextPageCache.datasource.bad-ds.prefetchWaitTimeoutMs"); + } + + @Test + void perDatasource_multipleOverrides_areIndependent() { + System.setProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs", "1000"); + System.setProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs", "2000"); + System.setProperty(WAIT_TIMEOUT_MS_KEY, "9000"); + + ServerConfiguration config = new ServerConfiguration(); + + assertEquals(1000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-a")); + assertEquals(2000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-b")); + assertEquals(9000L, config.getNextPageCachePrefetchWaitTimeoutMs("ds-c")); // falls back to global + + System.clearProperty("ojp.server.nextPageCache.datasource.ds-a.prefetchWaitTimeoutMs"); + System.clearProperty("ojp.server.nextPageCache.datasource.ds-b.prefetchWaitTimeoutMs"); + System.clearProperty(WAIT_TIMEOUT_MS_KEY); + } } diff --git a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java index e18723458..e0c5b22ce 100644 --- a/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java +++ b/ojp-server/src/test/java/org/openjproxy/grpc/server/paging/NextPagePrefetchCacheTest.java @@ -447,4 +447,48 @@ void backgroundCleanup_evictsExpiredEntries() throws Exception { cache.shutdown(); } + + // ---------------------------------------------------------------- + // Per-datasource prefetch wait timeout + // ---------------------------------------------------------------- + + @Test + void registerDatasourcePrefetchWaitTimeout_ignoresNullId() { + NextPagePrefetchCache cache = enabledCache(); + // Null datasourceId should be silently ignored (no NullPointerException) + cache.registerDatasourcePrefetchWaitTimeout(null, 1000); + } + + @Test + void getIfReady_usesPerDatasourceTimeout_whenRegistered() throws Exception { + // enabled, maxEntries=100, ttlSeconds=60, globalTimeoutMs=1, cleanupInterval=0 (disabled) + NextPagePrefetchCache cache = new NextPagePrefetchCache(true, 100, 60, 1, 0); // global: 1ms + cache.registerDatasourcePrefetchWaitTimeout("ds-custom", 5_000); // per-ds: 5 s + + DataSource ds = mockDataSource(3); + String sql = "SELECT id FROM t LIMIT 10 OFFSET 0"; + cache.prefetchAsync(ds, "ds-custom", sql, List.of()); + + OptionalIf a per-datasource override is configured via
+ * {@code ojp.server.nextPageCache.datasource.
Calling this method multiple times for the same {@code datasourceId} simply + * replaces the previously registered value. The registration is thread-safe.
+ * + * @param datasourceId the unique identifier of the datasource (connection hash) + * @param cacheEnabled {@code true} to enable caching, {@code false} to disable it + * for this specific datasource + */ + public void registerDatasourceCacheEnabled(String datasourceId, boolean cacheEnabled) { + if (datasourceId != null) { + datasourceCacheEnabled.put(datasourceId, cacheEnabled); + log.debug("Registered per-datasource cacheEnabled={} for datasourceId={}", + cacheEnabled, datasourceId); + } + } + + /** + * Returns whether the cache is enabled for the given datasource. + * If a per-datasource override has been registered via + * {@link #registerDatasourceCacheEnabled}, that value takes precedence + * over the global {@link #enabled} flag. + * + * @param datasourceId the connection hash for the datasource; may be {@code null} + * @return {@code true} if caching should be used for this datasource + */ + public boolean isEnabledForDatasource(String datasourceId) { + if (datasourceId != null) { + Boolean override = datasourceCacheEnabled.get(datasourceId); + if (override != null) { + return override; + } + } + return enabled; + } + /** * Cancels this instance's periodic cleanup task on the shared executor. * The shared executor itself is left running so that other cache instances @@ -294,7 +341,7 @@ public OptionalIf a per-datasource override is configured via
- * {@code ojp.server.nextPageCache.datasource.
CockroachDB is PostgreSQL-wire-compatible, so it uses the same {@code LIMIT n OFFSET m} + * pagination syntax and {@code BYTEA} binary type as the PostgreSQL test. + * + *
The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class CockroachDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(CockroachDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableCockroachDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for CockroachDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/cockroachdb_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "CockroachDB prefetch-cache tests are disabled " + + "(pass -DenableCockroachDBPrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_crdb_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INT PRIMARY KEY – 1-based row identifier
+ * name VARCHAR NOT NULL – "record_{id}"
+ * val_int INT NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool BOOLEAN NOT NULL – true when id is even
+ * val_text TEXT NOT NULL – "text_value_for_row_{id}"
+ * val_bytea BYTEA NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id INT PRIMARY KEY," +
+ " name VARCHAR(100) NOT NULL," +
+ " val_int INT NOT NULL," +
+ " val_bigint BIGINT NOT NULL," +
+ " val_bool BOOLEAN NOT NULL," +
+ " val_text TEXT NOT NULL," +
+ " val_bytea BYTEA NOT NULL" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_bytea)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setBoolean(5, i % 2 == 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBytea(i));
+ ps.addBatch();
+
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_bytea" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " LIMIT " + PAGE_SIZE + " OFFSET " + offset;
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0, rs.getBoolean("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ assertBytea(expectedBytea(id), rs.getObject("val_bytea"),
+ "val_bytea for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ private static void dropTable(Connection conn, String tableName) {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ logger.debug("Dropped table {}", tableName);
+ } catch (SQLException e) {
+ logger.warn("Could not drop table {}: {}", tableName, e.getMessage());
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Data-generation helpers
+ // -------------------------------------------------------------------------
+
+ private static byte[] expectedBytea(int rowId) {
+ return new byte[]{
+ (byte) (rowId & 0xFF),
+ (byte) ((rowId >> 8) & 0xFF),
+ (byte) ((rowId * 3) & 0xFF),
+ (byte) ((rowId * 7) & 0xFF)
+ };
+ }
+
+ /**
+ * Asserts that {@code actual} (which may be a {@code byte[]} or the hex-escape
+ * {@code String} {@code "\\xHH…"}) equals {@code expected} byte-for-byte.
+ */
+ private static void assertBytea(byte[] expected, Object actual, String columnLabel) {
+ assertNotNull(actual, columnLabel + " must not be null");
+
+ byte[] actualBytes;
+ if (actual instanceof byte[]) {
+ actualBytes = (byte[]) actual;
+ } else if (actual instanceof String) {
+ String s = (String) actual;
+ if (s.startsWith("\\x") || s.startsWith("\\X")) {
+ actualBytes = hexStringToBytes(s.substring(2));
+ } else {
+ actualBytes = s.getBytes(java.nio.charset.StandardCharsets.UTF_8);
+ }
+ } else {
+ actualBytes = fail(columnLabel + " has unexpected type " + actual.getClass().getName());
+ }
+
+ assertArrayEquals(expected, actualBytes, columnLabel + " bytes do not match");
+ }
+
+ private static byte[] hexStringToBytes(String hex) {
+ if (hex.isEmpty()) {
+ return new byte[0];
+ }
+ int len = hex.length();
+ byte[] data = new byte[len / 2];
+ for (int i = 0; i < len; i += 2) {
+ data[i / 2] = (byte) ((Character.digit(hex.charAt(i), 16) << 4)
+ + Character.digit(hex.charAt(i + 1), 16));
+ }
+ return data;
+ }
+}
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
new file mode 100644
index 000000000..38c4f248d
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
@@ -0,0 +1,262 @@
+package openjproxy.jdbc;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvFileSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+/**
+ * Integration test for the next-page prefetch cache feature with an IBM DB2 backend.
+ *
+ * DB2 uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * + *
The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class Db2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(Db2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableDb2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for DB2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/db2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "DB2 prefetch-cache tests are disabled " + + "(pass -DenableDb2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "DB2INST1.ojp_pfx_db2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + // DB2 requires explicit schema to avoid "object not found" errors + try (Statement schemaStmt = conn.createStatement()) { + schemaStmt.execute("SET SCHEMA DB2INST1"); + } + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INTEGER NOT NULL PRIMARY KEY – 1-based row identifier
+ * name VARCHAR(100) NOT NULL – "record_{id}"
+ * val_int INTEGER NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool SMALLINT NOT NULL – 1 when id is even, else 0
+ * val_text VARCHAR(255) NOT NULL – "text_value_for_row_{id}"
+ * val_blob BLOB(1K) NOT NULL – four deterministic bytes derived from id
+ *
+ *
+ * Note: DB2 does not have a native BOOLEAN SQL type in older versions; {@code SMALLINT} (0/1) + * is used as a portable substitute. TEXT is replaced with VARCHAR(255). + */ + private static void createTable(Connection conn, String tableName) throws SQLException { + // Drop if exists (DB2 uses different DROP syntax) + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + } catch (SQLException e) { + // Table does not exist – ignore + } + try (Statement stmt = conn.createStatement()) { + stmt.execute( + "CREATE TABLE " + tableName + " (" + + " id INTEGER NOT NULL," + + " name VARCHAR(100) NOT NULL," + + " val_int INTEGER NOT NULL," + + " val_bigint BIGINT NOT NULL," + + " val_bool SMALLINT NOT NULL," + + " val_text VARCHAR(255) NOT NULL," + + " val_blob BLOB(1K) NOT NULL," + + " PRIMARY KEY (id)" + + ")"); + } + logger.debug("Created table {}", tableName); + } + + private static void insertRows(Connection conn, String tableName, int recordCount) + throws SQLException { + String sql = "INSERT INTO " + tableName + + " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" + + " VALUES (?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement ps = conn.prepareStatement(sql)) { + for (int i = 1; i <= recordCount; i++) { + ps.setInt(1, i); + ps.setString(2, "record_" + i); + ps.setInt(3, i * 10); + ps.setLong(4, i * 1_000_000L); + ps.setInt(5, i % 2 == 0 ? 1 : 0); + ps.setString(6, "text_value_for_row_" + i); + ps.setBytes(7, expectedBlob(i)); + ps.addBatch(); + + if (i % 500 == 0) { + ps.executeBatch(); + } + } + ps.executeBatch(); + } + logger.debug("Inserted {} rows into {}", recordCount, tableName); + } + + private static int assertPage(Connection conn, String tableName, + int offset, int expectedRowsOnPage) + throws SQLException { + + // DB2 uses OFFSET m ROWS FETCH NEXT n ROWS ONLY + String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" + + " FROM " + tableName + + " ORDER BY id" + + " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY"; + + int rowsOnPage = 0; + try (PreparedStatement ps = conn.prepareStatement(sql); + ResultSet rs = ps.executeQuery()) { + + while (rs.next()) { + int expectedId = offset + rowsOnPage + 1; + int id = rs.getInt("id"); + + assertEquals(expectedId, id, + "id mismatch at offset=" + offset + " row=" + rowsOnPage); + assertEquals("record_" + id, rs.getString("name"), + "name mismatch for id=" + id); + assertEquals(id * 10, rs.getInt("val_int"), + "val_int mismatch for id=" + id); + assertEquals(id * 1_000_000L, rs.getLong("val_bigint"), + "val_bigint mismatch for id=" + id); + assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"), + "val_bool mismatch for id=" + id); + assertEquals("text_value_for_row_" + id, rs.getString("val_text"), + "val_text mismatch for id=" + id); + + byte[] actualBlob = toBlobBytes(rs, "val_blob", id); + assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null"); + assertArrayEquals(expectedBlob(id), actualBlob, + "val_blob bytes do not match for id=" + id); + + rowsOnPage++; + } + } + + assertEquals(expectedRowsOnPage, rowsOnPage, + "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows"); + return rowsOnPage; + } + + /** + * Reads a BLOB/binary column as a {@code byte[]}. + * + *
The prefetch cache materialises BLOBs as {@code byte[]} ({@link java.sql.Types#BINARY} / + * {@code VARBINARY}) when serving from cache, whereas a live DB query may return a + * {@link java.sql.Blob} object. Both representations are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + // Fallback: use getBytes + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java new file mode 100644 index 000000000..f8b6c79f2 --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java @@ -0,0 +1,225 @@ +package openjproxy.jdbc; + +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with an H2 backend. + * + *
H2 supports the {@code LIMIT n OFFSET m} pagination syntax. + * + *
The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class H2PaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(H2PaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isTestEnabled = Boolean.parseBoolean( + System.getProperty("enableH2PrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized test – one run per row in the CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for H2. + */ + @ParameterizedTest + @CsvFileSource(resources = "/h2_prefetch_cache_connections_with_record_counts.csv") + void testPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isTestEnabled, + "H2 prefetch-cache tests are disabled " + + "(pass -DenableH2PrefetchCacheTests=true to enable)"); + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "OJP_PFX_H2_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INT PRIMARY KEY – 1-based row identifier
+ * name VARCHAR(100) NOT NULL – "record_{id}"
+ * val_int INT NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool BOOLEAN NOT NULL – true when id is even
+ * val_text VARCHAR(255) NOT NULL – "text_value_for_row_{id}"
+ * val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id INT PRIMARY KEY," +
+ " name VARCHAR(100) NOT NULL," +
+ " val_int INT NOT NULL," +
+ " val_bigint BIGINT NOT NULL," +
+ " val_bool BOOLEAN NOT NULL," +
+ " val_text VARCHAR(255) NOT NULL," +
+ " val_binary VARBINARY(32) NOT NULL" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setBoolean(5, i % 2 == 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBinary(i));
+ ps.addBatch();
+
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " LIMIT " + PAGE_SIZE + " OFFSET " + offset;
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0, rs.getBoolean("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ Object binObj = rs.getObject("val_binary");
+ assertNotNull(binObj, "val_binary for id=" + id + " must not be null");
+ byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj
+ : rs.getBytes("val_binary");
+ assertArrayEquals(expectedBinary(id), actualBytes,
+ "val_binary bytes do not match for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ private static void dropTable(Connection conn, String tableName) {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ logger.debug("Dropped table {}", tableName);
+ } catch (SQLException e) {
+ logger.warn("Could not drop table {}: {}", tableName, e.getMessage());
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Data-generation helpers
+ // -------------------------------------------------------------------------
+
+ private static byte[] expectedBinary(int rowId) {
+ return new byte[]{
+ (byte) (rowId & 0xFF),
+ (byte) ((rowId >> 8) & 0xFF),
+ (byte) ((rowId * 3) & 0xFF),
+ (byte) ((rowId * 7) & 0xFF)
+ };
+ }
+}
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
new file mode 100644
index 000000000..938eae010
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
@@ -0,0 +1,257 @@
+package openjproxy.jdbc;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvFileSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+/**
+ * Integration test for the next-page prefetch cache feature with a MySQL backend.
+ *
+ * The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + */ +class MySQLMariaDBPaginationCacheIntegrationTest { + + private static final Logger logger = LoggerFactory.getLogger(MySQLMariaDBPaginationCacheIntegrationTest.class); + + /** Number of rows per page used throughout these tests. */ + private static final int PAGE_SIZE = 100; + + private static boolean isMySQLTestEnabled; + private static boolean isMariaDBTestEnabled; + + @BeforeAll + static void checkTestConfiguration() { + isMySQLTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMySQLPrefetchCacheTests", "false")); + isMariaDBTestEnabled = Boolean.parseBoolean( + System.getProperty("enableMariaDBPrefetchCacheTests", "false")); + } + + // ------------------------------------------------------------------------- + // Parameterized tests – one run per row in each CSV + // ------------------------------------------------------------------------- + + /** + * Core pagination test for MySQL. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mysql_prefetch_cache_connections_with_record_counts.csv") + void testMySQLPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMySQLTestEnabled, + "MySQL prefetch-cache tests are disabled " + + "(pass -DenableMySQLPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_mysql_"); + } + + /** + * Core pagination test for MariaDB. + */ + @ParameterizedTest + @CsvFileSource(resources = "/mariadb_prefetch_cache_connections_with_record_counts.csv") + void testMariaDBPaginationWithPrefetchCache(int recordCount, String driverClass, + String url, String user, String pwd) + throws SQLException, ClassNotFoundException { + + assumeTrue(isMariaDBTestEnabled, + "MariaDB prefetch-cache tests are disabled " + + "(pass -DenableMariaDBPrefetchCacheTests=true to enable)"); + + runPaginationTest(recordCount, driverClass, url, user, pwd, "ojp_pfx_maria_"); + } + + // ------------------------------------------------------------------------- + // Shared implementation + // ------------------------------------------------------------------------- + + private void runPaginationTest(int recordCount, String driverClass, + String url, String user, String pwd, + String tablePrefix) + throws SQLException, ClassNotFoundException { + + Class.forName(driverClass); + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = tablePrefix + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INT PRIMARY KEY – 1-based row identifier
+ * name VARCHAR(100) NOT NULL – "record_{id}"
+ * val_int INT NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool TINYINT(1) NOT NULL – 1 when id is even, else 0
+ * val_text TEXT NOT NULL – "text_value_for_row_{id}"
+ * val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id INT PRIMARY KEY," +
+ " name VARCHAR(100) NOT NULL," +
+ " val_int INT NOT NULL," +
+ " val_bigint BIGINT NOT NULL," +
+ " val_bool TINYINT(1) NOT NULL," +
+ " val_text TEXT NOT NULL," +
+ " val_binary VARBINARY(32) NOT NULL" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setInt(5, i % 2 == 0 ? 1 : 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBinary(i));
+ ps.addBatch();
+
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " LIMIT " + PAGE_SIZE + " OFFSET " + offset;
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ Object binaryObj = rs.getObject("val_binary");
+ assertNotNull(binaryObj, "val_binary for id=" + id + " must not be null");
+ byte[] actualBytes = binaryObj instanceof byte[] ? (byte[]) binaryObj
+ : rs.getBytes("val_binary");
+ assertArrayEquals(expectedBinary(id), actualBytes,
+ "val_binary bytes do not match for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ private static void dropTable(Connection conn, String tableName) {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ logger.debug("Dropped table {}", tableName);
+ } catch (SQLException e) {
+ logger.warn("Could not drop table {}: {}", tableName, e.getMessage());
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Data-generation helpers
+ // -------------------------------------------------------------------------
+
+ /**
+ * Returns four deterministic bytes for a given {@code rowId}.
+ */
+ private static byte[] expectedBinary(int rowId) {
+ return new byte[]{
+ (byte) (rowId & 0xFF),
+ (byte) ((rowId >> 8) & 0xFF),
+ (byte) ((rowId * 3) & 0xFF),
+ (byte) ((rowId * 7) & 0xFF)
+ };
+ }
+}
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
new file mode 100644
index 000000000..31ce5cf9e
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
@@ -0,0 +1,259 @@
+package openjproxy.jdbc;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvFileSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+/**
+ * Integration test for the next-page prefetch cache feature with an Oracle backend.
+ *
+ * Oracle 12c+ supports the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination + * syntax, which is recognised by the OJP {@code PaginationDetector}. + * + *
The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * + *
Oracle type notes: + *
Schema: + *
+ * id NUMBER(10) PRIMARY KEY – 1-based row identifier
+ * name VARCHAR2(100) NOT NULL – "record_{id}"
+ * val_int NUMBER(10) NOT NULL – id × 10
+ * val_bigint NUMBER(19,0) NOT NULL – id × 1,000,000
+ * val_bool NUMBER(1) NOT NULL – 1 when id is even, else 0
+ * val_text VARCHAR2(255) NOT NULL – "text_value_for_row_{id}"
+ * val_blob BLOB NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("DROP TABLE " + tableName);
+ } catch (SQLException e) {
+ // Table does not exist – ignore
+ }
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id NUMBER(10) NOT NULL," +
+ " name VARCHAR2(100) NOT NULL," +
+ " val_int NUMBER(10) NOT NULL," +
+ " val_bigint NUMBER(19,0) NOT NULL," +
+ " val_bool NUMBER(1) NOT NULL," +
+ " val_text VARCHAR2(255) NOT NULL," +
+ " val_blob BLOB NOT NULL," +
+ " CONSTRAINT pk_" + tableName + " PRIMARY KEY (id)" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_blob)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setInt(5, i % 2 == 0 ? 1 : 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBlob(i));
+ ps.addBatch();
+
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ // Oracle 12c+ OFFSET/FETCH syntax
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_blob" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY";
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ byte[] actualBlob = toBlobBytes(rs, "val_blob", id);
+ assertNotNull(actualBlob, "val_blob for id=" + id + " must not be null");
+ assertArrayEquals(expectedBlob(id), actualBlob,
+ "val_blob bytes do not match for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ /**
+ * Reads a BLOB column as a {@code byte[]}.
+ *
+ * The prefetch cache materialises BLOBs as {@code byte[]} when serving from cache, + * whereas a live DB query returns a {@link java.sql.Blob} object. Both are handled here. + */ + private static byte[] toBlobBytes(ResultSet rs, String column, int id) throws SQLException { + Object obj = rs.getObject(column); + if (obj == null) { + return null; + } + if (obj instanceof byte[]) { + return (byte[]) obj; + } + if (obj instanceof java.sql.Blob) { + java.sql.Blob blob = (java.sql.Blob) obj; + return blob.getBytes(1, (int) blob.length()); + } + return rs.getBytes(column); + } + + private static void dropTable(Connection conn, String tableName) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("DROP TABLE " + tableName); + logger.debug("Dropped table {}", tableName); + } catch (SQLException e) { + logger.warn("Could not drop table {}: {}", tableName, e.getMessage()); + } + } + + // ------------------------------------------------------------------------- + // Data-generation helpers + // ------------------------------------------------------------------------- + + private static byte[] expectedBlob(int rowId) { + return new byte[]{ + (byte) (rowId & 0xFF), + (byte) ((rowId >> 8) & 0xFF), + (byte) ((rowId * 3) & 0xFF), + (byte) ((rowId * 7) & 0xFF) + }; + } +} diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java new file mode 100644 index 000000000..58667b5fd --- /dev/null +++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java @@ -0,0 +1,270 @@ +package openjproxy.jdbc; + +import openjproxy.jdbc.testutil.SQLServerPrefetchCacheConnectionProvider; +import openjproxy.jdbc.testutil.SQLServerTestContainer; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * Integration test for the next-page prefetch cache feature with a SQL Server backend. + * + *
SQL Server uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax. + * The SQL Server container is managed by TestContainers; the test connects via an OJP prefetch-cache + * server on port 10594. + * + *
The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise + * boundary conditions around the 100-record page size. For each count the test: + *
This test is disabled by default and is activated by passing + * {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI. + * The target OJP server must already be running on port 10594 with the prefetch cache enabled. + * + *
SQL Server type notes: + *
The record count is provided via {@code @ValueSource} and the connection details via + * {@link SQLServerPrefetchCacheConnectionProvider}. JUnit 5 does not support mixing + * two argument sources in a single {@code @ParameterizedTest}, so the test obtains the + * connection from the shared TestContainer directly and iterates over record counts. + */ + @ParameterizedTest + @ValueSource(ints = {99, 100, 101, 567, 1000}) + void testPaginationWithPrefetchCache(int recordCount) throws SQLException { + + assumeTrue(isTestEnabled, + "SQL Server prefetch-cache tests are disabled " + + "(pass -DenableSqlServerPrefetchCacheTests=true to enable)"); + + // Build connection via the prefetch-cache connection provider + String[] connArgs = getConnectionArgs(); + String url = connArgs[0]; + String user = connArgs[1]; + String pwd = connArgs[2]; + + logger.info("Prefetch-cache pagination test: recordCount={}, url={}", recordCount, url); + + String tableName = "ojp_pfx_mssql_" + recordCount; + + try (Connection conn = DriverManager.getConnection(url, user, pwd)) { + + createTable(conn, tableName); + insertRows(conn, tableName, recordCount); + + int totalRetrieved = 0; + for (int offset = 0; offset < recordCount; offset += PAGE_SIZE) { + int expectedOnPage = Math.min(PAGE_SIZE, recordCount - offset); + totalRetrieved += assertPage(conn, tableName, offset, expectedOnPage); + } + + assertEquals(recordCount, totalRetrieved, + "Total rows retrieved across all pages must equal recordCount"); + + dropTable(conn, tableName); + } + } + + // ------------------------------------------------------------------------- + // Helpers + // ------------------------------------------------------------------------- + + /** Obtains [url, user, password] from the TestContainer via the prefetch-cache provider. */ + private static String[] getConnectionArgs() { + SQLServerTestContainer.getInstance(); + String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl(); + String username = SQLServerTestContainer.getUsername(); + String password = SQLServerTestContainer.getPassword(); + + String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "10594"); + String ojpProxyHost = System.getProperty("ojp.proxy.host", "localhost"); + + // strip "jdbc:" prefix and wrap with OJP proxy + String urlWithoutPrefix = containerJdbcUrl.startsWith("jdbc:") + ? containerJdbcUrl.substring("jdbc:".length()) + : containerJdbcUrl; + if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) { + urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb"; + } + String ojpUrl = "jdbc:ojp[" + ojpProxyHost + ":" + prefetchCachePort + "]_" + urlWithoutPrefix; + + return new String[]{ojpUrl, username, password}; + } + + /** + * Drops (if exists) and re-creates the test table. + * + *
Schema: + *
+ * id INT PRIMARY KEY – 1-based row identifier
+ * name NVARCHAR(100) NOT NULL – "record_{id}"
+ * val_int INT NOT NULL – id × 10
+ * val_bigint BIGINT NOT NULL – id × 1,000,000
+ * val_bool BIT NOT NULL – 1 when id is even, else 0
+ * val_text NVARCHAR(255) NOT NULL – "text_value_for_row_{id}"
+ * val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id
+ *
+ */
+ private static void createTable(Connection conn, String tableName) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName);
+ stmt.execute(
+ "CREATE TABLE " + tableName + " (" +
+ " id INT NOT NULL PRIMARY KEY," +
+ " name NVARCHAR(100) NOT NULL," +
+ " val_int INT NOT NULL," +
+ " val_bigint BIGINT NOT NULL," +
+ " val_bool BIT NOT NULL," +
+ " val_text NVARCHAR(255) NOT NULL," +
+ " val_binary VARBINARY(32) NOT NULL" +
+ ")");
+ }
+ logger.debug("Created table {}", tableName);
+ }
+
+ private static void insertRows(Connection conn, String tableName, int recordCount)
+ throws SQLException {
+ String sql = "INSERT INTO " + tableName +
+ " (id, name, val_int, val_bigint, val_bool, val_text, val_binary)" +
+ " VALUES (?, ?, ?, ?, ?, ?, ?)";
+
+ try (PreparedStatement ps = conn.prepareStatement(sql)) {
+ for (int i = 1; i <= recordCount; i++) {
+ ps.setInt(1, i);
+ ps.setString(2, "record_" + i);
+ ps.setInt(3, i * 10);
+ ps.setLong(4, i * 1_000_000L);
+ ps.setInt(5, i % 2 == 0 ? 1 : 0);
+ ps.setString(6, "text_value_for_row_" + i);
+ ps.setBytes(7, expectedBinary(i));
+ ps.addBatch();
+
+ if (i % 500 == 0) {
+ ps.executeBatch();
+ }
+ }
+ ps.executeBatch();
+ }
+ logger.debug("Inserted {} rows into {}", recordCount, tableName);
+ }
+
+ private static int assertPage(Connection conn, String tableName,
+ int offset, int expectedRowsOnPage)
+ throws SQLException {
+
+ // SQL Server: ORDER BY is required when using OFFSET/FETCH
+ String sql = "SELECT id, name, val_int, val_bigint, val_bool, val_text, val_binary" +
+ " FROM " + tableName +
+ " ORDER BY id" +
+ " OFFSET " + offset + " ROWS FETCH NEXT " + PAGE_SIZE + " ROWS ONLY";
+
+ int rowsOnPage = 0;
+ try (PreparedStatement ps = conn.prepareStatement(sql);
+ ResultSet rs = ps.executeQuery()) {
+
+ while (rs.next()) {
+ int expectedId = offset + rowsOnPage + 1;
+ int id = rs.getInt("id");
+
+ assertEquals(expectedId, id,
+ "id mismatch at offset=" + offset + " row=" + rowsOnPage);
+ assertEquals("record_" + id, rs.getString("name"),
+ "name mismatch for id=" + id);
+ assertEquals(id * 10, rs.getInt("val_int"),
+ "val_int mismatch for id=" + id);
+ assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
+ "val_bigint mismatch for id=" + id);
+ assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"),
+ "val_bool mismatch for id=" + id);
+ assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
+ "val_text mismatch for id=" + id);
+
+ Object binObj = rs.getObject("val_binary");
+ assertNotNull(binObj, "val_binary for id=" + id + " must not be null");
+ byte[] actualBytes = binObj instanceof byte[] ? (byte[]) binObj
+ : rs.getBytes("val_binary");
+ assertArrayEquals(expectedBinary(id), actualBytes,
+ "val_binary bytes do not match for id=" + id);
+
+ rowsOnPage++;
+ }
+ }
+
+ assertEquals(expectedRowsOnPage, rowsOnPage,
+ "Page at offset=" + offset + " expected " + expectedRowsOnPage + " rows");
+ return rowsOnPage;
+ }
+
+ private static void dropTable(Connection conn, String tableName) {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("IF OBJECT_ID('" + tableName + "', 'U') IS NOT NULL DROP TABLE " + tableName);
+ logger.debug("Dropped table {}", tableName);
+ } catch (SQLException e) {
+ logger.warn("Could not drop table {}: {}", tableName, e.getMessage());
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Data-generation helpers
+ // -------------------------------------------------------------------------
+
+ private static byte[] expectedBinary(int rowId) {
+ return new byte[]{
+ (byte) (rowId & 0xFF),
+ (byte) ((rowId >> 8) & 0xFF),
+ (byte) ((rowId * 3) & 0xFF),
+ (byte) ((rowId * 7) & 0xFF)
+ };
+ }
+}
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java
new file mode 100644
index 000000000..9ee45b248
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java
@@ -0,0 +1,74 @@
+package openjproxy.jdbc.testutil;
+
+import org.jetbrains.annotations.NotNull;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.ArgumentsProvider;
+
+import java.util.stream.Stream;
+
+/**
+ * Custom {@link ArgumentsProvider} for SQL Server prefetch-cache integration tests.
+ *
+ * Provides connection details pointing to the OJP prefetch-cache server on port 10594
+ * (instead of the standard port 1059 used by {@link SQLServerConnectionProvider}).
+ * The actual SQL Server instance is still supplied by {@link SQLServerTestContainer}.
+ */
+public class SQLServerPrefetchCacheConnectionProvider implements ArgumentsProvider {
+
+ private static final String JDBC_PREFIX = "jdbc:";
+
+ /** The OJP server with the prefetch cache enabled runs on this port in CI. */
+ private static final String PREFETCH_CACHE_PORT =
+ System.getProperty("ojp.prefetch.cache.port", "10594");
+ private static final String OJP_PROXY_HOST =
+ System.getProperty("ojp.proxy.host", "localhost");
+ private static final String PREFETCH_CACHE_ADDRESS = OJP_PROXY_HOST + ":" + PREFETCH_CACHE_PORT;
+
+ @Override
+ public Stream extends Arguments> provideArguments(ExtensionContext context) {
+ if (!SQLServerTestContainer.isEnabled()) {
+ return Stream.empty();
+ }
+
+ ConnectionProps result = getConnectionProps();
+ return Stream.of(
+ Arguments.of(result.driverClass, result.ojpUrl, result.username, result.password)
+ );
+ }
+
+ @NotNull
+ private static ConnectionProps getConnectionProps() {
+ SQLServerTestContainer.getInstance();
+
+ String containerJdbcUrl = SQLServerTestContainer.getJdbcUrl();
+ String username = SQLServerTestContainer.getUsername();
+ String password = SQLServerTestContainer.getPassword();
+
+ String driverClass = "org.openjproxy.jdbc.Driver";
+ String urlWithoutPrefix = containerJdbcUrl.startsWith(JDBC_PREFIX)
+ ? containerJdbcUrl.substring(JDBC_PREFIX.length())
+ : containerJdbcUrl;
+
+ if (!urlWithoutPrefix.toLowerCase().contains("databasename=")) {
+ urlWithoutPrefix = urlWithoutPrefix + ";databaseName=defaultdb";
+ }
+
+ String ojpUrl = JDBC_PREFIX + "ojp[" + PREFETCH_CACHE_ADDRESS + "]_" + urlWithoutPrefix;
+ return new ConnectionProps(username, password, driverClass, ojpUrl);
+ }
+
+ private static class ConnectionProps {
+ private final String username;
+ private final String password;
+ private final String driverClass;
+ private final String ojpUrl;
+
+ ConnectionProps(String username, String password, String driverClass, String ojpUrl) {
+ this.username = username;
+ this.password = password;
+ this.driverClass = driverClass;
+ this.ojpUrl = ojpUrl;
+ }
+ }
+}
diff --git a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..a5d2dd22b
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
diff --git a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..5c9d3c61c
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
diff --git a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..04e81001b
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
diff --git a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..93d6fd48e
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
diff --git a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..18d75f2fa
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
diff --git a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv
new file mode 100644
index 000000000..ffb4adc0c
--- /dev/null
+++ b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv
@@ -0,0 +1,5 @@
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
From d7cf13c02658f8e61587eb6394a453b21c736446 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Wed, 18 Mar 2026 22:37:11 +0000
Subject: [PATCH 19/22] fix(test): use getBoolean() for boolean-typed columns
in pagination cache tests
MariaDB TINYINT(1) and SQL Server BIT are both mapped to Java Boolean by
their JDBC drivers. Calling rs.getInt() on these columns threw
NumberFormatException("false"). Changed both tests to use getBoolean()
with `id % 2 == 0`, consistent with PostgreSQL/H2/CockroachDB tests.
Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com>
---
.../jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java | 4 ++--
.../jdbc/SQLServerPaginationCacheIntegrationTest.java | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
index 938eae010..95e2edbdc 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
@@ -136,7 +136,7 @@ private void runPaginationTest(int recordCount, String driverClass,
* name VARCHAR(100) NOT NULL – "record_{id}"
* val_int INT NOT NULL – id × 10
* val_bigint BIGINT NOT NULL – id × 1,000,000
- * val_bool TINYINT(1) NOT NULL – 1 when id is even, else 0
+ * val_bool TINYINT(1) NOT NULL – true (1) when id is even, else false (0)
* val_text TEXT NOT NULL – "text_value_for_row_{id}"
* val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id
*
@@ -209,7 +209,7 @@ private static int assertPage(Connection conn, String tableName,
"val_int mismatch for id=" + id);
assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
"val_bigint mismatch for id=" + id);
- assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"),
+ assertEquals(id % 2 == 0, rs.getBoolean("val_bool"),
"val_bool mismatch for id=" + id);
assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
"val_text mismatch for id=" + id);
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
index 58667b5fd..cd62a0002 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
@@ -151,7 +151,7 @@ private static String[] getConnectionArgs() {
* name NVARCHAR(100) NOT NULL – "record_{id}"
* val_int INT NOT NULL – id × 10
* val_bigint BIGINT NOT NULL – id × 1,000,000
- * val_bool BIT NOT NULL – 1 when id is even, else 0
+ * val_bool BIT NOT NULL – true (1) when id is even, else false (0)
* val_text NVARCHAR(255) NOT NULL – "text_value_for_row_{id}"
* val_binary VARBINARY(32) NOT NULL – four deterministic bytes derived from id
*
@@ -225,7 +225,7 @@ private static int assertPage(Connection conn, String tableName,
"val_int mismatch for id=" + id);
assertEquals(id * 1_000_000L, rs.getLong("val_bigint"),
"val_bigint mismatch for id=" + id);
- assertEquals(id % 2 == 0 ? 1 : 0, rs.getInt("val_bool"),
+ assertEquals(id % 2 == 0, rs.getBoolean("val_bool"),
"val_bool mismatch for id=" + id);
assertEquals("text_value_for_row_" + id, rs.getString("val_text"),
"val_text mismatch for id=" + id);
From 4ba935404b6377e2f2aae062d744ddadb6cb772b Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Thu, 19 Mar 2026 07:57:25 +0000
Subject: [PATCH 20/22] fix: forward ojp.nextPageCache.* client properties to
server; update docs and tests for client-side config
Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com>
---
.../configuration/ojp-server-configuration.md | 7 ++++-
.../part2-chapter6-server-configuration.md | 14 +++++++--
.../features/NEXT_PAGE_PREFETCH_CACHE.md | 31 +++++++++++++------
.../jdbc/DatasourcePropertiesLoader.java | 8 +++--
...roachDBPaginationCacheIntegrationTest.java | 9 ++++--
.../Db2PaginationCacheIntegrationTest.java | 9 ++++--
.../H2PaginationCacheIntegrationTest.java | 9 ++++--
...MariaDBPaginationCacheIntegrationTest.java | 9 ++++--
.../OraclePaginationCacheIntegrationTest.java | 9 ++++--
...ostgresPaginationCacheIntegrationTest.java | 9 ++++--
...LServerPaginationCacheIntegrationTest.java | 9 ++++--
.../src/test/resources/ojp.properties | 4 +++
12 files changed, 97 insertions(+), 30 deletions(-)
diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md
index fed2b3347..5017ae006 100644
--- a/documents/configuration/ojp-server-configuration.md
+++ b/documents/configuration/ojp-server-configuration.md
@@ -177,9 +177,14 @@ The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET
> **Per-datasource `enabled` is a client-side setting.**
> Each datasource in the client application can independently opt in or out of the prefetch cache
-> by setting `ojp.nextPageCache.enabled=false` in its `ojp.properties`:
+> by setting `ojp.nextPageCache.enabled` in its `ojp.properties`. The value is sent to the server
+> at connection time; when absent, the server's global flag applies as the fallback.
> ```properties
> # ojp.properties — client application
+>
+> # Default datasource: explicitly enable the cache
+> ojp.nextPageCache.enabled=true
+>
> # Disable the prefetch cache for the "random-access" datasource
> random-access.ojp.nextPageCache.enabled=false
> ```
diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md
index 868807d2c..e98f5e8b0 100644
--- a/documents/ebook/part2-chapter6-server-configuration.md
+++ b/documents/ebook/part2-chapter6-server-configuration.md
@@ -447,7 +447,7 @@ java -Duser.timezone=UTC \
-jar ojp-server.jar
```
-**All prefetch cache settings:**
+**Server-side settings (`ojp-server.properties` / JVM system properties):**
| Property | Default | Description |
|---|---|---|
@@ -458,15 +458,23 @@ java -Duser.timezone=UTC \
| `ojp.server.nextPageCache.cleanupIntervalSeconds` | `60` | Interval (seconds) between background eviction sweeps |
| `ojp.server.nextPageCache.datasource. This test is disabled by default and is activated by passing
* {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*/
class CockroachDBPaginationCacheIntegrationTest {
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
index 38c4f248d..e88472a52 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
@@ -30,7 +30,9 @@
* including a {@code BLOB} column.
* This test is disabled by default and is activated by passing
* {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*/
class Db2PaginationCacheIntegrationTest {
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
index f8b6c79f2..4ea88b3b6 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
@@ -29,7 +29,9 @@
* This test is disabled by default and is activated by passing
* {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*/
class H2PaginationCacheIntegrationTest {
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
index 95e2edbdc..5f8d9acb5 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
@@ -27,7 +27,9 @@
* This test is disabled by default and is activated by passing
* {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*/
class MySQLMariaDBPaginationCacheIntegrationTest {
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
index 31ce5cf9e..d4550380e 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
@@ -30,7 +30,9 @@
* This test is disabled by default and is activated by passing
* {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*
* Oracle type notes:
* This test is disabled by default and is activated by passing
* {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*/
class PostgresPaginationCacheIntegrationTest {
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
index cd62a0002..50a0bab87 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
@@ -35,7 +35,9 @@
* This test is disabled by default and is activated by passing
* {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with the prefetch cache enabled.
+ * The target OJP server must already be running on port 10594 with
+ * {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
+ * {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
+ * the server at connection time to explicitly opt this datasource into the cache.
*
* SQL Server type notes:
* This test is disabled by default and is activated by passing
* {@code -DenableCockroachDBPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
index e88472a52..6772df9b9 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/Db2PaginationCacheIntegrationTest.java
@@ -30,7 +30,7 @@
* including a {@code BLOB} column. This test is disabled by default and is activated by passing
* {@code -DenableDb2PrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
index 4ea88b3b6..5ad694b4b 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/H2PaginationCacheIntegrationTest.java
@@ -29,7 +29,7 @@
* This test is disabled by default and is activated by passing
* {@code -DenableH2PrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
index 5f8d9acb5..745cd39b6 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/MySQLMariaDBPaginationCacheIntegrationTest.java
@@ -27,7 +27,7 @@
* This test is disabled by default and is activated by passing
* {@code -DenableMySQLPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
index d4550380e..3a989cad9 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/OraclePaginationCacheIntegrationTest.java
@@ -30,7 +30,7 @@
* This test is disabled by default and is activated by passing
* {@code -DenableOraclePrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
index 9271bda5f..5cbec47d1 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
@@ -28,7 +28,7 @@
* This test is disabled by default and is activated by passing
* {@code -DenablePostgresPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
@@ -71,7 +71,7 @@ static void checkTestConfiguration() {
*
* @param recordCount total rows to insert and paginate over
* @param driverClass fully-qualified OJP driver class (loaded as a side-effect)
- * @param url JDBC URL pointing at the prefetch-cache OJP server (port 10594)
+ * @param url JDBC URL pointing at the prefetch-cache OJP server (default port 1059)
* @param user database user
* @param pwd database password
*/
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
index 50a0bab87..42c399b0e 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/SQLServerPaginationCacheIntegrationTest.java
@@ -27,7 +27,7 @@
*
* SQL Server uses the ANSI SQL {@code OFFSET m ROWS FETCH NEXT n ROWS ONLY} pagination syntax.
* The SQL Server container is managed by TestContainers; the test connects via an OJP prefetch-cache
- * server on port 10594.
+ * server on default port 1059.
*
* The test is parameterized over several record counts (99, 100, 101, 567, 1000) to exercise
* boundary conditions around the 100-record page size. For each count the test:
@@ -35,7 +35,7 @@
* This test is disabled by default and is activated by passing
* {@code -DenableSqlServerPrefetchCacheTests=true} to the Maven Surefire plugin in CI.
- * The target OJP server must already be running on port 10594 with
+ * The target OJP server must already be running on default port 1059 with
* {@code ojp.server.nextPageCache.enabled=true}. The client-side per-datasource flag
* {@code ojp.nextPageCache.enabled=true} is set in {@code ojp.properties} and is sent to
* the server at connection time to explicitly opt this datasource into the cache.
@@ -132,7 +132,7 @@ private static String[] getConnectionArgs() {
String username = SQLServerTestContainer.getUsername();
String password = SQLServerTestContainer.getPassword();
- String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "10594");
+ String prefetchCachePort = System.getProperty("ojp.prefetch.cache.port", "1059");
String ojpProxyHost = System.getProperty("ojp.proxy.host", "localhost");
// strip "jdbc:" prefix and wrap with OJP proxy
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java
index 9ee45b248..18ad828b4 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/testutil/SQLServerPrefetchCacheConnectionProvider.java
@@ -10,17 +10,17 @@
/**
* Custom {@link ArgumentsProvider} for SQL Server prefetch-cache integration tests.
*
- * Provides connection details pointing to the OJP prefetch-cache server on port 10594
- * (instead of the standard port 1059 used by {@link SQLServerConnectionProvider}).
+ * Provides connection details pointing to the OJP server (default port 1059) with the
+ * next-page prefetch cache enabled via the client-side property {@code ojp.nextPageCache.enabled}.
* The actual SQL Server instance is still supplied by {@link SQLServerTestContainer}.
*/
public class SQLServerPrefetchCacheConnectionProvider implements ArgumentsProvider {
private static final String JDBC_PREFIX = "jdbc:";
- /** The OJP server with the prefetch cache enabled runs on this port in CI. */
+ /** OJP server host:port used for prefetch-cache tests (defaults to standard port 1059). */
private static final String PREFETCH_CACHE_PORT =
- System.getProperty("ojp.prefetch.cache.port", "10594");
+ System.getProperty("ojp.prefetch.cache.port", "1059");
private static final String OJP_PROXY_HOST =
System.getProperty("ojp.proxy.host", "localhost");
private static final String PREFETCH_CACHE_ADDRESS = OJP_PROXY_HOST + ":" + PREFETCH_CACHE_PORT;
diff --git a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv
index a5d2dd22b..75e108955 100644
--- a/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/cockroachdb_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:26257/defaultdb?sslmode=disable,root,
diff --git a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv
index 5c9d3c61c..d21d2b7cd 100644
--- a/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/db2_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_db2://localhost:50000/testdb,db2inst1,testpass
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_db2://localhost:50000/testdb,db2inst1,testpass
diff --git a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv
index 04e81001b..c653232c4 100644
--- a/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/h2_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_h2:~/test,sa,
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa,
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa,
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa,
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa,
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_h2:~/test,sa,
diff --git a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv
index 93d6fd48e..f7a2f41f1 100644
--- a/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/mariadb_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mariadb://localhost:3307/defaultdb,testuser,testpassword
diff --git a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv
index 18d75f2fa..bddb835d2 100644
--- a/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/mysql_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_mysql://localhost:3306/defaultdb,testuser,testpassword
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_mysql://localhost:3306/defaultdb,testuser,testpassword
diff --git a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv
index ffb4adc0c..757148ccb 100644
--- a/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/oracle_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_oracle:thin:@localhost:1521/XEPDB1,testuser,testpassword
diff --git a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv
index bd7d1f891..a06451d49 100644
--- a/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv
+++ b/ojp-jdbc-driver/src/test/resources/postgres_prefetch_cache_connections_with_record_counts.csv
@@ -1,5 +1,5 @@
-99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword
-100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword
-101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword
-567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword
-1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:10594]_postgresql://localhost:5432/defaultdb,testuser,testpassword
+99,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword
+100,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword
+101,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword
+567,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword
+1000,org.openjproxy.jdbc.Driver,jdbc:ojp[localhost:1059]_postgresql://localhost:5432/defaultdb,testuser,testpassword
From c0da110302ab3803e76365e0aa38355b4c86eab1 Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Thu, 19 Mar 2026 09:51:32 +0000
Subject: [PATCH 22/22] docs: clarify two-tier config model for next-page
prefetch cache across all three docs
Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com>
---
.../configuration/ojp-server-configuration.md | 5 ++-
.../part2-chapter6-server-configuration.md | 33 +++++++++++--------
.../features/NEXT_PAGE_PREFETCH_CACHE.md | 13 ++++++--
3 files changed, 34 insertions(+), 17 deletions(-)
diff --git a/documents/configuration/ojp-server-configuration.md b/documents/configuration/ojp-server-configuration.md
index 5017ae006..bde1b77cc 100644
--- a/documents/configuration/ojp-server-configuration.md
+++ b/documents/configuration/ojp-server-configuration.md
@@ -164,7 +164,10 @@ For full integration examples including Docker Compose setups, see the **[Teleme
The prefetch cache transparently pre-executes the **next page query** in the background while the current page is being sent to the client. When the client requests the next page, the rows are served from memory instead of hitting the database again, significantly reducing perceived latency for paginated result sets.
-The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`). No client changes are needed — the feature is entirely transparent.
+The cache detects SQL pagination clauses automatically (`LIMIT/OFFSET`, `OFFSET … FETCH`, `FETCH FIRST … ROWS ONLY`, MySQL `LIMIT m, n`, and standalone `LIMIT n`).
+
+> **Two-tier configuration model:**
+> The cache uses a two-tier configuration model. The **server administrator** enables the feature globally and tunes its resource limits (TTL, max entries, timeouts). Each **client application** then controls, per datasource, whether that datasource uses the cache — without requiring a server restart. See the client-side settings below.
| Property | Environment Variable | Type | Default | Description | Since |
|---|---|---|---|---|---|
diff --git a/documents/ebook/part2-chapter6-server-configuration.md b/documents/ebook/part2-chapter6-server-configuration.md
index e98f5e8b0..8b63ce977 100644
--- a/documents/ebook/part2-chapter6-server-configuration.md
+++ b/documents/ebook/part2-chapter6-server-configuration.md
@@ -439,7 +439,12 @@ The cache key combines the datasource identifier and the normalised SQL text, so
### Configuration
-The prefetch cache is **disabled by default**. Enable it with a single property:
+The prefetch cache uses a **two-tier configuration model**:
+
+- **Server administrator** enables the global cache infrastructure and tunes resource limits (TTL, max entries, timeouts) in `ojp-server.properties` or as JVM system properties.
+- **Client application** controls, per datasource, whether that datasource uses the cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` — without requiring a server restart.
+
+**Step 1 — Server administrator: enable the infrastructure**
```bash
java -Duser.timezone=UTC \
@@ -447,6 +452,18 @@ java -Duser.timezone=UTC \
-jar ojp-server.jar
```
+**Step 2 — Client application: opt in per datasource** (`ojp.properties`)
+
+```properties
+# Default datasource — explicitly opt in
+ojp.nextPageCache.enabled=true
+
+# "random-access" datasource — opt out even though server has the cache enabled
+random-access.ojp.nextPageCache.enabled=false
+```
+
+When a datasource does not set `ojp.nextPageCache.enabled`, the server's global `ojp.server.nextPageCache.enabled` value is used as the fallback.
+
**Server-side settings (`ojp-server.properties` / JVM system properties):**
| Property | Default | Description |
@@ -466,19 +483,7 @@ java -Duser.timezone=UTC \
### Per-Datasource Cache Control
-The per-datasource `enabled` flag is a **client-side** connection property. Each datasource in the client application can independently opt in or out of the prefetch cache by setting `ojp.nextPageCache.enabled` in its `ojp.properties` file — no server restart needed:
-
-```properties
-# ojp.properties — client application
-
-# Default datasource: explicitly enable the cache
-ojp.nextPageCache.enabled=true
-
-# "random-access" datasource: disable the prefetch cache for random-access workloads
-random-access.ojp.nextPageCache.enabled=false
-```
-
-**Per-datasource wait timeout (different DB response times):**
+While each client datasource controls its `enabled` flag (shown in the "Configuration" section above), the server administrator can also tune the prefetch wait timeout on a per-datasource basis — useful when different databases have significantly different response times:
```bash
java -Duser.timezone=UTC \
diff --git a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md
index b926a9f70..dab55eaf5 100644
--- a/documents/features/NEXT_PAGE_PREFETCH_CACHE.md
+++ b/documents/features/NEXT_PAGE_PREFETCH_CACHE.md
@@ -124,14 +124,23 @@ ojp.server.nextPageCache.datasource.oltp.prefetchWaitTimeoutMs=1000
## Quick Start
-**Enable with defaults:**
+**Step 1 — Server administrator: enable the cache infrastructure**
```bash
java -Duser.timezone=UTC \
-Dojp.server.nextPageCache.enabled=true \
-jar ojp-server.jar
```
-**Tuned for a reporting workload:**
+**Step 2 — Client application: opt in per datasource** (`ojp.properties`)
+```properties
+# Default datasource: enable the prefetch cache
+ojp.nextPageCache.enabled=true
+
+# "olap" datasource: disable cache for random-access workloads
+olap.ojp.nextPageCache.enabled=false
+```
+
+**Tuned server settings for a reporting workload:**
```bash
java -Duser.timezone=UTC \
-Dojp.server.nextPageCache.enabled=true \
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
index 0e85469c6..9271bda5f 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/PostgresPaginationCacheIntegrationTest.java
@@ -28,7 +28,9 @@
*
diff --git a/ojp-jdbc-driver/src/test/resources/ojp.properties b/ojp-jdbc-driver/src/test/resources/ojp.properties
index 0330b4734..bacc62b5b 100644
--- a/ojp-jdbc-driver/src/test/resources/ojp.properties
+++ b/ojp-jdbc-driver/src/test/resources/ojp.properties
@@ -5,6 +5,10 @@ ojp.connection.pool.idleTimeout=2000
ojp.connection.pool.maxLifetime=1200000
ojp.connection.pool.connectionTimeout=20000
+# Enable the next-page prefetch cache for this client (client-side per-datasource setting).
+# The server must also have ojp.server.nextPageCache.enabled=true for this to take effect.
+ojp.nextPageCache.enabled=true
+
# Multinode-specific configuration for non-XA connections
multinode.ojp.connection.pool.maximumPoolSize=22
multinode.ojp.connection.pool.minimumIdle=20
From 3a930568021152eef5504721758b45508e8cc4cf Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Thu, 19 Mar 2026 09:25:18 +0000
Subject: [PATCH 21/22] refactor(ci): merge prefetch-cache server into main OJP
server; update CSV ports and test docs
Co-authored-by: rrobetti <7221783+rrobetti@users.noreply.github.com>
---
.github/workflows/main.yml | 98 ++-----------------
...roachDBPaginationCacheIntegrationTest.java | 4 +-
.../Db2PaginationCacheIntegrationTest.java | 4 +-
.../H2PaginationCacheIntegrationTest.java | 4 +-
...MariaDBPaginationCacheIntegrationTest.java | 4 +-
.../OraclePaginationCacheIntegrationTest.java | 4 +-
...ostgresPaginationCacheIntegrationTest.java | 6 +-
...LServerPaginationCacheIntegrationTest.java | 8 +-
...ServerPrefetchCacheConnectionProvider.java | 8 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
...h_cache_connections_with_record_counts.csv | 10 +-
16 files changed, 64 insertions(+), 146 deletions(-)
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 574333ae2..8243449ba 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -98,16 +98,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -131,9 +124,6 @@ jobs:
if: always() # ensures it runs even if previous steps fail
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 2: PostgreSQL Integration Tests
@@ -217,7 +207,7 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
# Start second OJP server WITH SQL enhancer enabled in OPTIMIZE mode
@@ -232,15 +222,6 @@ jobs:
-e JAVA_TOOL_OPTIONS="-Dojp.server.port=10593 -Dojp.prometheus.port=9163 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.sql.enhancer.enabled=true -Dojp.sql.enhancer.mode=OPTIMIZE -Dojp.sql.enhancer.dialect=POSTGRESQL" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Start third OJP server WITH next-page prefetch cache enabled
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
-
- name: Wait for ojp-server to start
run: sleep 10
@@ -288,9 +269,6 @@ jobs:
echo ""
echo "=== OJP Server (with SQL enhancer) log ==="
docker logs ojp-server-enhancer 2>&1 || echo "ojp-server-enhancer container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 3: MySQL Integration Tests
@@ -365,16 +343,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -397,9 +368,6 @@ jobs:
if: always()
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# ===========================================================================
@@ -473,16 +441,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -505,9 +466,6 @@ jobs:
if: always()
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 5: CockroachDB Integration Tests
@@ -572,16 +530,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -604,9 +555,6 @@ jobs:
if: always()
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 6: DB2 Integration Tests
@@ -752,16 +700,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -784,9 +725,6 @@ jobs:
if: always()
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 7: Multinode Integration Tests
@@ -1760,16 +1698,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -1792,9 +1723,6 @@ jobs:
if: always() # ensures it runs even if previous steps fail
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 10: SQL Server Integration Tests
@@ -1875,16 +1803,9 @@ jobs:
run: |
docker run -d --name ojp-server \
--network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true" \
+ -e JAVA_TOOL_OPTIONS="-Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
rrobetti/ojp:0.4.1-SNAPSHOT
- # Pagination-cache integration tests run against this server (port 10594)
- - name: Start OJP Server container (prefetch cache on port 10594)
- run: |
- docker run -d --name ojp-server-prefetch-cache \
- --network host \
- -e JAVA_TOOL_OPTIONS="-Dojp.server.port=10594 -Dojp.prometheus.port=9164 -Dojp.server.slowQuerySegregation.enabled=true -Dojp.server.nextPageCache.enabled=true -Dojp.server.nextPageCache.ttlSeconds=60 -Dojp.server.nextPageCache.prefetchWaitTimeoutMs=5000" \
- rrobetti/ojp:0.4.1-SNAPSHOT
- name: Wait for ojp-server to start
run: sleep 10
@@ -1913,9 +1834,6 @@ jobs:
if: always() # ensures it runs even if previous steps fail
run: |
docker logs ojp-server 2>&1 || echo "ojp-server container not found"
- echo ""
- echo "=== OJP Server (with prefetch cache) log ==="
- docker logs ojp-server-prefetch-cache 2>&1 || echo "ojp-server-prefetch-cache container not found"
# ===========================================================================
# JOB 11: Notify Integration Repository
diff --git a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java
index f0ee7aec0..00442d192 100644
--- a/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java
+++ b/ojp-jdbc-driver/src/test/java/openjproxy/jdbc/CockroachDBPaginationCacheIntegrationTest.java
@@ -31,7 +31,7 @@
*